diff --git a/API/APICallbackFunction.h b/API/APICallbackFunction.h new file mode 100644 index 0000000..e5283b5 --- /dev/null +++ b/API/APICallbackFunction.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef APICallbackFunction_h +#define APICallbackFunction_h + +#include "APICast.h" +#include "Error.h" +#include "JSCallbackConstructor.h" +#include "JSLock.h" +#include + +namespace JSC { + +struct APICallbackFunction { + +template static EncodedJSValue JSC_HOST_CALL call(ExecState*); +template static EncodedJSValue JSC_HOST_CALL construct(ExecState*); + +}; + +template +EncodedJSValue JSC_HOST_CALL APICallbackFunction::call(ExecState* exec) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSContextRef execRef = toRef(exec); + JSObjectRef functionRef = toRef(exec->jsCallee()); + JSObjectRef thisObjRef = toRef(jsCast(exec->thisValue().toThis(exec, NotStrictMode))); + + int argumentCount = static_cast(exec->argumentCount()); + Vector arguments; + arguments.reserveInitialCapacity(argumentCount); + for (int i = 0; i < argumentCount; i++) + arguments.uncheckedAppend(toRef(exec, exec->uncheckedArgument(i))); + + JSValueRef exception = 0; + JSValueRef result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = jsCast(toJS(functionRef))->functionCallback()(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + + // result must be a valid JSValue. + if (!result) + return JSValue::encode(jsUndefined()); + + return JSValue::encode(toJS(exec, result)); +} + +template +EncodedJSValue JSC_HOST_CALL APICallbackFunction::construct(ExecState* exec) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSObject* constructor = exec->jsCallee(); + JSContextRef ctx = toRef(exec); + JSObjectRef constructorRef = toRef(constructor); + + JSObjectCallAsConstructorCallback callback = jsCast(constructor)->constructCallback(); + if (callback) { + size_t argumentCount = exec->argumentCount(); + Vector arguments; + arguments.reserveInitialCapacity(argumentCount); + for (size_t i = 0; i < argumentCount; ++i) + arguments.uncheckedAppend(toRef(exec, exec->uncheckedArgument(i))); + + JSValueRef exception = 0; + JSObjectRef result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = callback(ctx, constructorRef, argumentCount, arguments.data(), &exception); + } + if (exception) { + throwException(exec, scope, toJS(exec, exception)); + return JSValue::encode(toJS(exec, exception)); + } + // result must be a valid JSValue. + if (!result) + return throwVMTypeError(exec, scope); + return JSValue::encode(toJS(result)); + } + + return JSValue::encode(toJS(JSObjectMake(ctx, jsCast(constructor)->classRef(), 0))); +} + +} // namespace JSC + +#endif // APICallbackFunction_h diff --git a/API/APICast.h b/API/APICast.h new file mode 100644 index 0000000..8fe8d60 --- /dev/null +++ b/API/APICast.h @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef APICast_h +#define APICast_h + +#include "JSAPIValueWrapper.h" +#include "JSCJSValue.h" +#include "JSCJSValueInlines.h" +#include "JSGlobalObject.h" + +namespace JSC { + class ExecState; + class PropertyNameArray; + class VM; + class JSObject; + class JSValue; +} + +typedef const struct OpaqueJSContextGroup* JSContextGroupRef; +typedef const struct OpaqueJSContext* JSContextRef; +typedef struct OpaqueJSContext* JSGlobalContextRef; +typedef struct OpaqueJSPropertyNameAccumulator* JSPropertyNameAccumulatorRef; +typedef const struct OpaqueJSValue* JSValueRef; +typedef struct OpaqueJSValue* JSObjectRef; + +/* Opaque typing convenience methods */ + +inline JSC::ExecState* toJS(JSContextRef c) +{ + ASSERT(c); + return reinterpret_cast(const_cast(c)); +} + +inline JSC::ExecState* toJS(JSGlobalContextRef c) +{ + ASSERT(c); + return reinterpret_cast(c); +} + +inline JSC::JSValue toJS(JSC::ExecState* exec, JSValueRef v) +{ + ASSERT_UNUSED(exec, exec); +#if USE(JSVALUE32_64) + JSC::JSCell* jsCell = reinterpret_cast(const_cast(v)); + if (!jsCell) + return JSC::jsNull(); + JSC::JSValue result; + if (jsCell->isAPIValueWrapper()) + result = JSC::jsCast(jsCell)->value(); + else + result = jsCell; +#else + JSC::JSValue result = JSC::JSValue::decode(reinterpret_cast(const_cast(v))); +#endif + if (!result) + return JSC::jsNull(); + if (result.isCell()) + RELEASE_ASSERT(result.asCell()->methodTable()); + return result; +} + +inline JSC::JSValue toJSForGC(JSC::ExecState* exec, JSValueRef v) +{ + ASSERT_UNUSED(exec, exec); +#if USE(JSVALUE32_64) + JSC::JSCell* jsCell = reinterpret_cast(const_cast(v)); + if (!jsCell) + return JSC::JSValue(); + JSC::JSValue result = jsCell; +#else + JSC::JSValue result = JSC::JSValue::decode(reinterpret_cast(const_cast(v))); +#endif + if (result && result.isCell()) + RELEASE_ASSERT(result.asCell()->methodTable()); + return result; +} + +// Used in JSObjectGetPrivate as that may be called during finalization +inline JSC::JSObject* uncheckedToJS(JSObjectRef o) +{ + return reinterpret_cast(o); +} + +inline JSC::JSObject* toJS(JSObjectRef o) +{ + JSC::JSObject* object = uncheckedToJS(o); + if (object) + RELEASE_ASSERT(object->methodTable()); + return object; +} + +inline JSC::PropertyNameArray* toJS(JSPropertyNameAccumulatorRef a) +{ + return reinterpret_cast(a); +} + +inline JSC::VM* toJS(JSContextGroupRef g) +{ + return reinterpret_cast(const_cast(g)); +} + +inline JSValueRef toRef(JSC::ExecState* exec, JSC::JSValue v) +{ + ASSERT(exec->vm().currentThreadIsHoldingAPILock()); +#if USE(JSVALUE32_64) + if (!v) + return 0; + if (!v.isCell()) + return reinterpret_cast(JSC::jsAPIValueWrapper(exec, v).asCell()); + return reinterpret_cast(v.asCell()); +#else + UNUSED_PARAM(exec); + return reinterpret_cast(JSC::JSValue::encode(v)); +#endif +} + +inline JSObjectRef toRef(JSC::JSObject* o) +{ + return reinterpret_cast(o); +} + +inline JSObjectRef toRef(const JSC::JSObject* o) +{ + return reinterpret_cast(const_cast(o)); +} + +inline JSContextRef toRef(JSC::ExecState* e) +{ + return reinterpret_cast(e); +} + +inline JSGlobalContextRef toGlobalRef(JSC::ExecState* e) +{ + ASSERT(e == e->lexicalGlobalObject()->globalExec()); + return reinterpret_cast(e); +} + +inline JSPropertyNameAccumulatorRef toRef(JSC::PropertyNameArray* l) +{ + return reinterpret_cast(l); +} + +inline JSContextGroupRef toRef(JSC::VM* g) +{ + return reinterpret_cast(g); +} + +#endif // APICast_h diff --git a/API/APIUtils.h b/API/APIUtils.h new file mode 100644 index 0000000..e2190c8 --- /dev/null +++ b/API/APIUtils.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef APIUtils_h +#define APIUtils_h + +#include "Exception.h" +#include "JSCJSValue.h" +#include "JSGlobalObjectInspectorController.h" +#include "JSValueRef.h" + +enum class ExceptionStatus { + DidThrow, + DidNotThrow +}; + +inline ExceptionStatus handleExceptionIfNeeded(JSC::ExecState* exec, JSValueRef* returnedExceptionRef) +{ + JSC::VM& vm = exec->vm(); + auto scope = DECLARE_CATCH_SCOPE(vm); + if (UNLIKELY(scope.exception())) { + JSC::Exception* exception = scope.exception(); + if (returnedExceptionRef) + *returnedExceptionRef = toRef(exec, exception->value()); + scope.clearException(); +#if ENABLE(REMOTE_INSPECTOR) + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, exception); +#endif + return ExceptionStatus::DidThrow; + } + return ExceptionStatus::DidNotThrow; +} + +inline void setException(JSC::ExecState* exec, JSValueRef* returnedExceptionRef, JSC::JSValue exception) +{ + if (returnedExceptionRef) + *returnedExceptionRef = toRef(exec, exception); +#if ENABLE(REMOTE_INSPECTOR) + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, JSC::Exception::create(exec->vm(), exception)); +#endif +} + +#endif /* APIUtils_h */ diff --git a/API/JSAPIWrapperObject.h b/API/JSAPIWrapperObject.h new file mode 100644 index 0000000..14194b6 --- /dev/null +++ b/API/JSAPIWrapperObject.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSAPIWrapperObject_h +#define JSAPIWrapperObject_h + +#include "JSBase.h" +#include "JSDestructibleObject.h" +#include "WeakReferenceHarvester.h" + +#if JSC_OBJC_API_ENABLED + +namespace JSC { + +class JSAPIWrapperObject : public JSDestructibleObject { +public: + typedef JSDestructibleObject Base; + + void finishCreation(VM&); + static void visitChildren(JSCell*, JSC::SlotVisitor&); + + void* wrappedObject() { return m_wrappedObject; } + void setWrappedObject(void*); + +protected: + JSAPIWrapperObject(VM&, Structure*); + +private: + void* m_wrappedObject; +}; + +} // namespace JSC + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSAPIWrapperObject_h diff --git a/API/JSAPIWrapperObject.mm b/API/JSAPIWrapperObject.mm new file mode 100644 index 0000000..f301d3e --- /dev/null +++ b/API/JSAPIWrapperObject.mm @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSAPIWrapperObject.h" + +#include "JSCInlines.h" +#include "JSCallbackObject.h" +#include "JSVirtualMachineInternal.h" +#include "Structure.h" +#include + +#if JSC_OBJC_API_ENABLED + +class JSAPIWrapperObjectHandleOwner : public JSC::WeakHandleOwner { +public: + void finalize(JSC::Handle, void*) override; + bool isReachableFromOpaqueRoots(JSC::Handle, void* context, JSC::SlotVisitor&) override; +}; + +static JSAPIWrapperObjectHandleOwner* jsAPIWrapperObjectHandleOwner() +{ + static NeverDestroyed jsWrapperObjectHandleOwner; + return &jsWrapperObjectHandleOwner.get(); +} + +void JSAPIWrapperObjectHandleOwner::finalize(JSC::Handle handle, void*) +{ + JSC::JSAPIWrapperObject* wrapperObject = static_cast(handle.get().asCell()); + if (!wrapperObject->wrappedObject()) + return; + + JSC::Heap::heap(wrapperObject)->releaseSoon(adoptNS(static_cast(wrapperObject->wrappedObject()))); + JSC::WeakSet::deallocate(JSC::WeakImpl::asWeakImpl(handle.slot())); +} + +bool JSAPIWrapperObjectHandleOwner::isReachableFromOpaqueRoots(JSC::Handle handle, void*, JSC::SlotVisitor& visitor) +{ + JSC::JSAPIWrapperObject* wrapperObject = JSC::jsCast(handle.get().asCell()); + // We use the JSGlobalObject when processing weak handles to prevent the situation where using + // the same Objective-C object in multiple global objects keeps all of the global objects alive. + if (!wrapperObject->wrappedObject()) + return false; + return JSC::Heap::isMarked(wrapperObject->structure()->globalObject()) && visitor.containsOpaqueRoot(wrapperObject->wrappedObject()); +} + +namespace JSC { + +template <> const ClassInfo JSCallbackObject::s_info = { "JSAPIWrapperObject", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; + +template<> const bool JSCallbackObject::needsDestruction = true; + +template <> +Structure* JSCallbackObject::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) +{ + return Structure::create(vm, globalObject, proto, TypeInfo(ObjectType, StructureFlags), &s_info); +} + +JSAPIWrapperObject::JSAPIWrapperObject(VM& vm, Structure* structure) + : Base(vm, structure) + , m_wrappedObject(0) +{ +} + +void JSAPIWrapperObject::finishCreation(VM& vm) +{ + Base::finishCreation(vm); + WeakSet::allocate(this, jsAPIWrapperObjectHandleOwner(), 0); // Balanced in JSAPIWrapperObjectHandleOwner::finalize. +} + +void JSAPIWrapperObject::setWrappedObject(void* wrappedObject) +{ + ASSERT(!m_wrappedObject); + m_wrappedObject = [static_cast(wrappedObject) retain]; +} + +void JSAPIWrapperObject::visitChildren(JSCell* cell, JSC::SlotVisitor& visitor) +{ + JSAPIWrapperObject* thisObject = JSC::jsCast(cell); + Base::visitChildren(cell, visitor); + + void* wrappedObject = thisObject->wrappedObject(); + if (wrappedObject) + scanExternalObjectGraph(visitor.vm(), visitor, wrappedObject); +} + +} // namespace JSC + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/JSBase.cpp b/API/JSBase.cpp new file mode 100644 index 0000000..a3e2059 --- /dev/null +++ b/API/JSBase.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2006, 2007, 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSBase.h" +#include "JSBasePrivate.h" + +#include "APICast.h" +#include "CallFrame.h" +#include "Completion.h" +#include "Exception.h" +#include "GCActivityCallback.h" +#include "InitializeThreading.h" +#include "JSGlobalObject.h" +#include "JSLock.h" +#include "JSObject.h" +#include "OpaqueJSString.h" +#include "JSCInlines.h" +#include "SourceCode.h" +#include + +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + +using namespace JSC; + +JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef thisObject, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsThisObject = toJS(thisObject); + + startingLineNumber = std::max(1, startingLineNumber); + + // evaluate sets "this" to the global object if it is NULL + JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); + SourceCode source = makeSource(script->string(), sourceURL ? sourceURL->string() : String(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); + + NakedPtr evaluationException; + JSValue returnValue = profiledEvaluate(globalObject->globalExec(), ProfilingReason::API, source, jsThisObject, evaluationException); + + if (evaluationException) { + if (exception) + *exception = toRef(exec, evaluationException->value()); +#if ENABLE(REMOTE_INSPECTOR) + // FIXME: If we have a debugger attached we could learn about ParseError exceptions through + // ScriptDebugServer::sourceParsed and this path could produce a duplicate warning. The + // Debugger path is currently ignored by inspector. + // NOTE: If we don't have a debugger, this SourceCode will be forever lost to the inspector. + // We could stash it in the inspector in case an inspector is ever opened. + globalObject->inspectorController().reportAPIException(exec, evaluationException); +#endif + return 0; + } + + if (returnValue) + return toRef(exec, returnValue); + + // happens, for example, when the only statement is an empty (';') statement + return toRef(exec, jsUndefined()); +} + +bool JSCheckScriptSyntax(JSContextRef ctx, JSStringRef script, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + startingLineNumber = std::max(1, startingLineNumber); + + SourceCode source = makeSource(script->string(), sourceURL ? sourceURL->string() : String(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); + + JSValue syntaxException; + bool isValidSyntax = checkSyntax(exec->vmEntryGlobalObject()->globalExec(), source, &syntaxException); + + if (!isValidSyntax) { + if (exception) + *exception = toRef(exec, syntaxException); +#if ENABLE(REMOTE_INSPECTOR) + Exception* exception = Exception::create(exec->vm(), syntaxException); + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, exception); +#endif + return false; + } + + return true; +} + +void JSGarbageCollect(JSContextRef ctx) +{ + // We used to recommend passing NULL as an argument here, which caused the only heap to be collected. + // As there is no longer a shared heap, the previously recommended usage became a no-op (but the GC + // will happen when the context group is destroyed). + // Because the function argument was originally ignored, some clients may pass their released context here, + // in which case there is a risk of crashing if another thread performs GC on the same heap in between. + if (!ctx) + return; + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + exec->vm().heap.reportAbandonedObjectGraph(); +} + +void JSReportExtraMemoryCost(JSContextRef ctx, size_t size) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + exec->vm().heap.deprecatedReportExtraMemory(size); +} + +extern "C" JS_EXPORT void JSSynchronousGarbageCollectForDebugging(JSContextRef); +extern "C" JS_EXPORT void JSSynchronousEdenCollectForDebugging(JSContextRef); + +void JSSynchronousGarbageCollectForDebugging(JSContextRef ctx) +{ + if (!ctx) + return; + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + exec->vm().heap.collectAllGarbage(); +} + +void JSSynchronousEdenCollectForDebugging(JSContextRef ctx) +{ + if (!ctx) + return; + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + exec->vm().heap.collectSync(CollectionScope::Eden); +} + +void JSDisableGCTimer(void) +{ + GCActivityCallback::s_shouldCreateGCTimer = false; +} + +#if PLATFORM(IOS) +// FIXME: Expose symbols to tell dyld where to find JavaScriptCore on older versions of +// iOS (< 7.0). We should remove these symbols once we no longer need to support such +// versions of iOS. See for more details. +JS_EXPORT extern const char iosInstallName43 __asm("$ld$install_name$os4.3$/System/Library/PrivateFrameworks/JavaScriptCore.framework/JavaScriptCore"); +JS_EXPORT extern const char iosInstallName50 __asm("$ld$install_name$os5.0$/System/Library/PrivateFrameworks/JavaScriptCore.framework/JavaScriptCore"); +JS_EXPORT extern const char iosInstallName51 __asm("$ld$install_name$os5.1$/System/Library/PrivateFrameworks/JavaScriptCore.framework/JavaScriptCore"); +JS_EXPORT extern const char iosInstallName60 __asm("$ld$install_name$os6.0$/System/Library/PrivateFrameworks/JavaScriptCore.framework/JavaScriptCore"); +JS_EXPORT extern const char iosInstallName61 __asm("$ld$install_name$os6.1$/System/Library/PrivateFrameworks/JavaScriptCore.framework/JavaScriptCore"); + +const char iosInstallName43 = 0; +const char iosInstallName50 = 0; +const char iosInstallName51 = 0; +const char iosInstallName60 = 0; +const char iosInstallName61 = 0; +#endif diff --git a/API/JSBase.h b/API/JSBase.h new file mode 100644 index 0000000..bc85aca --- /dev/null +++ b/API/JSBase.h @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSBase_h +#define JSBase_h + +#ifndef __cplusplus +#include +#endif + +#ifdef __OBJC__ +#import +#endif + +/* JavaScript engine interface */ + +/*! @typedef JSContextGroupRef A group that associates JavaScript contexts with one another. Contexts in the same group may share and exchange JavaScript objects. */ +typedef const struct OpaqueJSContextGroup* JSContextGroupRef; + +/*! @typedef JSContextRef A JavaScript execution context. Holds the global object and other execution state. */ +typedef const struct OpaqueJSContext* JSContextRef; + +/*! @typedef JSGlobalContextRef A global JavaScript execution context. A JSGlobalContext is a JSContext. */ +typedef struct OpaqueJSContext* JSGlobalContextRef; + +/*! @typedef JSStringRef A UTF16 character buffer. The fundamental string representation in JavaScript. */ +typedef struct OpaqueJSString* JSStringRef; + +/*! @typedef JSClassRef A JavaScript class. Used with JSObjectMake to construct objects with custom behavior. */ +typedef struct OpaqueJSClass* JSClassRef; + +/*! @typedef JSPropertyNameArrayRef An array of JavaScript property names. */ +typedef struct OpaqueJSPropertyNameArray* JSPropertyNameArrayRef; + +/*! @typedef JSPropertyNameAccumulatorRef An ordered set used to collect the names of a JavaScript object's properties. */ +typedef struct OpaqueJSPropertyNameAccumulator* JSPropertyNameAccumulatorRef; + +/*! @typedef JSTypedArrayBytesDeallocator A function used to deallocate bytes passed to a Typed Array constructor. The function should take two arguments. The first is a pointer to the bytes that were originally passed to the Typed Array constructor. The second is a pointer to additional information desired at the time the bytes are to be freed. */ +typedef void (*JSTypedArrayBytesDeallocator)(void* bytes, void* deallocatorContext); + +/* JavaScript data types */ + +/*! @typedef JSValueRef A JavaScript value. The base type for all JavaScript values, and polymorphic functions on them. */ +typedef const struct OpaqueJSValue* JSValueRef; + +/*! @typedef JSObjectRef A JavaScript object. A JSObject is a JSValue. */ +typedef struct OpaqueJSValue* JSObjectRef; + +/* JavaScript symbol exports */ +/* These rules should stay the same as in WebKit2/Shared/API/c/WKBase.h */ + +#undef JS_EXPORT +#if defined(JS_NO_EXPORT) +#define JS_EXPORT +#elif defined(__GNUC__) && !defined(__CC_ARM) && !defined(__ARMCC__) +#define JS_EXPORT __attribute__((visibility("default"))) +#elif defined(WIN32) || defined(_WIN32) || defined(_WIN32_WCE) || defined(__CC_ARM) || defined(__ARMCC__) +#if defined(BUILDING_JavaScriptCore) || defined(STATICALLY_LINKED_WITH_JavaScriptCore) +#define JS_EXPORT __declspec(dllexport) +#else +#define JS_EXPORT __declspec(dllimport) +#endif +#else /* !defined(JS_NO_EXPORT) */ +#define JS_EXPORT +#endif /* defined(JS_NO_EXPORT) */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Script Evaluation */ + +/*! +@function JSEvaluateScript +@abstract Evaluates a string of JavaScript. +@param ctx The execution context to use. +@param script A JSString containing the script to evaluate. +@param thisObject The object to use as "this," or NULL to use the global object as "this." +@param sourceURL A JSString containing a URL for the script's source file. This is used by debuggers and when reporting exceptions. Pass NULL if you do not care to include source file information. +@param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The JSValue that results from evaluating script, or NULL if an exception is thrown. +*/ +JS_EXPORT JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef thisObject, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception); + +/*! +@function JSCheckScriptSyntax +@abstract Checks for syntax errors in a string of JavaScript. +@param ctx The execution context to use. +@param script A JSString containing the script to check for syntax errors. +@param sourceURL A JSString containing a URL for the script's source file. This is only used when reporting exceptions. Pass NULL if you do not care to include source file information in exceptions. +@param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. +@param exception A pointer to a JSValueRef in which to store a syntax error exception, if any. Pass NULL if you do not care to store a syntax error exception. +@result true if the script is syntactically correct, otherwise false. +*/ +JS_EXPORT bool JSCheckScriptSyntax(JSContextRef ctx, JSStringRef script, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception); + +/*! +@function JSGarbageCollect +@abstract Performs a JavaScript garbage collection. +@param ctx The execution context to use. +@discussion JavaScript values that are on the machine stack, in a register, + protected by JSValueProtect, set as the global object of an execution context, + or reachable from any such value will not be collected. + + During JavaScript execution, you are not required to call this function; the + JavaScript engine will garbage collect as needed. JavaScript values created + within a context group are automatically destroyed when the last reference + to the context group is released. +*/ +JS_EXPORT void JSGarbageCollect(JSContextRef ctx); + +#ifdef __cplusplus +} +#endif + +/* Enable the Objective-C API for platforms with a modern runtime. */ +#if !defined(JSC_OBJC_API_ENABLED) +#define JSC_OBJC_API_ENABLED (defined(__clang__) && defined(__APPLE__) && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && !defined(__i386__)) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE))) +#endif + +#endif /* JSBase_h */ diff --git a/API/JSBasePrivate.h b/API/JSBasePrivate.h new file mode 100644 index 0000000..1375949 --- /dev/null +++ b/API/JSBasePrivate.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSBasePrivate_h +#define JSBasePrivate_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@function +@abstract Reports an object's non-GC memory payload to the garbage collector. +@param ctx The execution context to use. +@param size The payload's size, in bytes. +@discussion Use this function to notify the garbage collector that a GC object +owns a large non-GC memory region. Calling this function will encourage the +garbage collector to collect soon, hoping to reclaim that large non-GC memory +region. +*/ +JS_EXPORT void JSReportExtraMemoryCost(JSContextRef ctx, size_t size) CF_AVAILABLE(10_6, 7_0); + +JS_EXPORT void JSDisableGCTimer(void); + +#ifdef __cplusplus +} +#endif + +#endif /* JSBasePrivate_h */ diff --git a/API/JSCTestRunnerUtils.cpp b/API/JSCTestRunnerUtils.cpp new file mode 100644 index 0000000..d314c5d --- /dev/null +++ b/API/JSCTestRunnerUtils.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSCTestRunnerUtils.h" + +#include "APICast.h" +#include "JSCInlines.h" +#include "TestRunnerUtils.h" + +namespace JSC { + + +JSValueRef failNextNewCodeBlock(JSContextRef context) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, failNextNewCodeBlock(exec)); +} + +JSValueRef numberOfDFGCompiles(JSContextRef context, JSValueRef theFunctionValueRef) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, numberOfDFGCompiles(toJS(exec, theFunctionValueRef))); +} + +JSValueRef setNeverInline(JSContextRef context, JSValueRef theFunctionValueRef) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, setNeverInline(toJS(exec, theFunctionValueRef))); +} + +JSValueRef setNeverOptimize(JSContextRef context, JSValueRef theFunctionValueRef) +{ + ExecState* exec= toJS(context); + JSLockHolder holder(exec); + return toRef(exec, setNeverOptimize(toJS(exec, theFunctionValueRef))); +} + +} // namespace JSC + diff --git a/API/JSCTestRunnerUtils.h b/API/JSCTestRunnerUtils.h new file mode 100644 index 0000000..c52da52 --- /dev/null +++ b/API/JSCTestRunnerUtils.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSCTestRunnerUtils_h +#define JSCTestRunnerUtils_h + +#include +#include + +namespace JSC { + +JS_EXPORT_PRIVATE JSValueRef failNextNewCodeBlock(JSContextRef); +JS_EXPORT_PRIVATE JSValueRef numberOfDFGCompiles(JSContextRef, JSValueRef theFunction); +JS_EXPORT_PRIVATE JSValueRef setNeverInline(JSContextRef, JSValueRef theFunction); +JS_EXPORT_PRIVATE JSValueRef setNeverOptimize(JSContextRef, JSValueRef theFunction); + +} // namespace JSC + +#endif // JSCTestRunnerUtils_h diff --git a/API/JSCallbackConstructor.cpp b/API/JSCallbackConstructor.cpp new file mode 100644 index 0000000..5597c3e --- /dev/null +++ b/API/JSCallbackConstructor.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSCallbackConstructor.h" + +#include "APICallbackFunction.h" +#include "APICast.h" +#include "Error.h" +#include "JSGlobalObject.h" +#include "JSLock.h" +#include "ObjectPrototype.h" +#include "JSCInlines.h" + +namespace JSC { + +const ClassInfo JSCallbackConstructor::s_info = { "CallbackConstructor", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackConstructor) }; + +JSCallbackConstructor::JSCallbackConstructor(JSGlobalObject* globalObject, Structure* structure, JSClassRef jsClass, JSObjectCallAsConstructorCallback callback) + : JSDestructibleObject(globalObject->vm(), structure) + , m_class(jsClass) + , m_callback(callback) +{ +} + +void JSCallbackConstructor::finishCreation(JSGlobalObject* globalObject, JSClassRef jsClass) +{ + Base::finishCreation(globalObject->vm()); + ASSERT(inherits(info())); + if (m_class) + JSClassRetain(jsClass); +} + +JSCallbackConstructor::~JSCallbackConstructor() +{ + if (m_class) + JSClassRelease(m_class); +} + +void JSCallbackConstructor::destroy(JSCell* cell) +{ + static_cast(cell)->JSCallbackConstructor::~JSCallbackConstructor(); +} + +ConstructType JSCallbackConstructor::getConstructData(JSCell*, ConstructData& constructData) +{ + constructData.native.function = APICallbackFunction::construct; + return ConstructType::Host; +} + +} // namespace JSC diff --git a/API/JSCallbackConstructor.h b/API/JSCallbackConstructor.h new file mode 100644 index 0000000..d730ad7 --- /dev/null +++ b/API/JSCallbackConstructor.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSCallbackConstructor_h +#define JSCallbackConstructor_h + +#include "JSObjectRef.h" +#include "runtime/JSDestructibleObject.h" + +namespace JSC { + +class JSCallbackConstructor : public JSDestructibleObject { +public: + typedef JSDestructibleObject Base; + static const unsigned StructureFlags = Base::StructureFlags | ImplementsHasInstance | ImplementsDefaultHasInstance; + + static JSCallbackConstructor* create(ExecState* exec, JSGlobalObject* globalObject, Structure* structure, JSClassRef classRef, JSObjectCallAsConstructorCallback callback) + { + JSCallbackConstructor* constructor = new (NotNull, allocateCell(*exec->heap())) JSCallbackConstructor(globalObject, structure, classRef, callback); + constructor->finishCreation(globalObject, classRef); + return constructor; + } + + ~JSCallbackConstructor(); + static void destroy(JSCell*); + JSClassRef classRef() const { return m_class; } + JSObjectCallAsConstructorCallback callback() const { return m_callback; } + DECLARE_INFO; + + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(ObjectType, StructureFlags), info()); + } + +protected: + JSCallbackConstructor(JSGlobalObject*, Structure*, JSClassRef, JSObjectCallAsConstructorCallback); + void finishCreation(JSGlobalObject*, JSClassRef); + +private: + friend struct APICallbackFunction; + + static ConstructType getConstructData(JSCell*, ConstructData&); + + JSObjectCallAsConstructorCallback constructCallback() { return m_callback; } + + JSClassRef m_class; + JSObjectCallAsConstructorCallback m_callback; +}; + +} // namespace JSC + +#endif // JSCallbackConstructor_h diff --git a/API/JSCallbackFunction.cpp b/API/JSCallbackFunction.cpp new file mode 100644 index 0000000..57333f5 --- /dev/null +++ b/API/JSCallbackFunction.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2006, 2008, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSCallbackFunction.h" + +#include "APICallbackFunction.h" +#include "APICast.h" +#include "CodeBlock.h" +#include "Error.h" +#include "ExceptionHelpers.h" +#include "FunctionPrototype.h" +#include "JSFunction.h" +#include "JSGlobalObject.h" +#include "JSLock.h" +#include "JSCInlines.h" + +namespace JSC { + +STATIC_ASSERT_IS_TRIVIALLY_DESTRUCTIBLE(JSCallbackFunction); + +const ClassInfo JSCallbackFunction::s_info = { "CallbackFunction", &InternalFunction::s_info, 0, CREATE_METHOD_TABLE(JSCallbackFunction) }; + +JSCallbackFunction::JSCallbackFunction(VM& vm, Structure* structure, JSObjectCallAsFunctionCallback callback) + : InternalFunction(vm, structure) + , m_callback(callback) +{ +} + +void JSCallbackFunction::finishCreation(VM& vm, const String& name) +{ + Base::finishCreation(vm, name); + ASSERT(inherits(info())); +} + +JSCallbackFunction* JSCallbackFunction::create(VM& vm, JSGlobalObject* globalObject, JSObjectCallAsFunctionCallback callback, const String& name) +{ + Structure* structure = globalObject->callbackFunctionStructure(); + JSCallbackFunction* function = new (NotNull, allocateCell(vm.heap)) JSCallbackFunction(vm, structure, callback); + function->finishCreation(vm, name); + return function; +} + +CallType JSCallbackFunction::getCallData(JSCell*, CallData& callData) +{ + callData.native.function = APICallbackFunction::call; + return CallType::Host; +} + +} // namespace JSC diff --git a/API/JSCallbackFunction.h b/API/JSCallbackFunction.h new file mode 100644 index 0000000..a4fdd06 --- /dev/null +++ b/API/JSCallbackFunction.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSCallbackFunction_h +#define JSCallbackFunction_h + +#include "InternalFunction.h" +#include "JSObjectRef.h" + +namespace JSC { + +class JSCallbackFunction : public InternalFunction { + friend struct APICallbackFunction; +public: + typedef InternalFunction Base; + + static JSCallbackFunction* create(VM&, JSGlobalObject*, JSObjectCallAsFunctionCallback, const String& name); + + DECLARE_INFO; + + // InternalFunction mish-mashes constructor and function behavior -- we should + // refactor the code so this override isn't necessary + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(ObjectType, StructureFlags), info()); + } + +private: + JSCallbackFunction(VM&, Structure*, JSObjectCallAsFunctionCallback); + void finishCreation(VM&, const String& name); + + static CallType getCallData(JSCell*, CallData&); + + JSObjectCallAsFunctionCallback functionCallback() { return m_callback; } + + JSObjectCallAsFunctionCallback m_callback; +}; + +} // namespace JSC + +#endif // JSCallbackFunction_h diff --git a/API/JSCallbackObject.cpp b/API/JSCallbackObject.cpp new file mode 100644 index 0000000..02b38fd --- /dev/null +++ b/API/JSCallbackObject.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * Copyright (C) 2007 Eric Seidel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSCallbackObject.h" + +#include "Heap.h" +#include "JSCInlines.h" +#include + +namespace JSC { + +// Define the two types of JSCallbackObjects we support. +template <> const ClassInfo JSCallbackObject::s_info = { "CallbackObject", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; +template <> const ClassInfo JSCallbackObject::s_info = { "CallbackGlobalObject", &Base::s_info, 0, CREATE_METHOD_TABLE(JSCallbackObject) }; + +template<> const bool JSCallbackObject::needsDestruction = true; +template<> const bool JSCallbackObject::needsDestruction = false; + +template<> +JSCallbackObject* JSCallbackObject::create(VM& vm, JSClassRef classRef, Structure* structure) +{ + JSCallbackObject* callbackObject = new (NotNull, allocateCell>(vm.heap)) JSCallbackObject(vm, classRef, structure); + callbackObject->finishCreation(vm); + vm.heap.addFinalizer(callbackObject, destroy); + return callbackObject; +} + +template <> +Structure* JSCallbackObject::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) +{ + return Structure::create(vm, globalObject, proto, TypeInfo(ObjectType, StructureFlags), info()); +} + +template <> +Structure* JSCallbackObject::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) +{ + return Structure::create(vm, globalObject, proto, TypeInfo(GlobalObjectType, StructureFlags), info()); +} + +} // namespace JSC diff --git a/API/JSCallbackObject.h b/API/JSCallbackObject.h new file mode 100644 index 0000000..2525982 --- /dev/null +++ b/API/JSCallbackObject.h @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2006-2016 Apple Inc. All rights reserved. + * Copyright (C) 2007 Eric Seidel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSCallbackObject_h +#define JSCallbackObject_h + +#include "JSObjectRef.h" +#include "JSValueRef.h" +#include "JSObject.h" + +namespace JSC { + +struct JSCallbackObjectData { + WTF_MAKE_FAST_ALLOCATED; +public: + JSCallbackObjectData(void* privateData, JSClassRef jsClass) + : privateData(privateData) + , jsClass(jsClass) + { + JSClassRetain(jsClass); + } + + ~JSCallbackObjectData() + { + JSClassRelease(jsClass); + } + + JSValue getPrivateProperty(const Identifier& propertyName) const + { + if (!m_privateProperties) + return JSValue(); + return m_privateProperties->getPrivateProperty(propertyName); + } + + void setPrivateProperty(VM& vm, JSCell* owner, const Identifier& propertyName, JSValue value) + { + if (!m_privateProperties) + m_privateProperties = std::make_unique(); + m_privateProperties->setPrivateProperty(vm, owner, propertyName, value); + } + + void deletePrivateProperty(const Identifier& propertyName) + { + if (!m_privateProperties) + return; + m_privateProperties->deletePrivateProperty(propertyName); + } + + void visitChildren(SlotVisitor& visitor) + { + JSPrivatePropertyMap* properties = m_privateProperties.get(); + if (!properties) + return; + properties->visitChildren(visitor); + } + + void* privateData; + JSClassRef jsClass; + struct JSPrivatePropertyMap { + WTF_MAKE_FAST_ALLOCATED; + public: + JSValue getPrivateProperty(const Identifier& propertyName) const + { + PrivatePropertyMap::const_iterator location = m_propertyMap.find(propertyName.impl()); + if (location == m_propertyMap.end()) + return JSValue(); + return location->value.get(); + } + + void setPrivateProperty(VM& vm, JSCell* owner, const Identifier& propertyName, JSValue value) + { + LockHolder locker(m_lock); + WriteBarrier empty; + m_propertyMap.add(propertyName.impl(), empty).iterator->value.set(vm, owner, value); + } + + void deletePrivateProperty(const Identifier& propertyName) + { + LockHolder locker(m_lock); + m_propertyMap.remove(propertyName.impl()); + } + + void visitChildren(SlotVisitor& visitor) + { + LockHolder locker(m_lock); + for (PrivatePropertyMap::iterator ptr = m_propertyMap.begin(); ptr != m_propertyMap.end(); ++ptr) { + if (ptr->value) + visitor.append(ptr->value); + } + } + + private: + typedef HashMap, WriteBarrier, IdentifierRepHash> PrivatePropertyMap; + PrivatePropertyMap m_propertyMap; + Lock m_lock; + }; + std::unique_ptr m_privateProperties; +}; + + +template +class JSCallbackObject : public Parent { +protected: + JSCallbackObject(ExecState*, Structure*, JSClassRef, void* data); + JSCallbackObject(VM&, JSClassRef, Structure*); + + void finishCreation(ExecState*); + void finishCreation(VM&); + +public: + typedef Parent Base; + static const unsigned StructureFlags = Base::StructureFlags | ProhibitsPropertyCaching | OverridesGetOwnPropertySlot | InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero | ImplementsHasInstance | OverridesGetPropertyNames | TypeOfShouldCallGetCallData; + + ~JSCallbackObject(); + + static JSCallbackObject* create(ExecState* exec, JSGlobalObject* globalObject, Structure* structure, JSClassRef classRef, void* data) + { + ASSERT_UNUSED(globalObject, !structure->globalObject() || structure->globalObject() == globalObject); + JSCallbackObject* callbackObject = new (NotNull, allocateCell(*exec->heap())) JSCallbackObject(exec, structure, classRef, data); + callbackObject->finishCreation(exec); + return callbackObject; + } + static JSCallbackObject* create(VM&, JSClassRef, Structure*); + + static const bool needsDestruction; + static void destroy(JSCell* cell) + { + static_cast(cell)->JSCallbackObject::~JSCallbackObject(); + } + + void setPrivate(void* data); + void* getPrivate(); + + // FIXME: We should fix the warnings for extern-template in JSObject template classes: https://bugs.webkit.org/show_bug.cgi?id=161979 +#if COMPILER(CLANG) +#if __has_warning("-Wundefined-var-template") +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wundefined-var-template" +#endif +#endif + DECLARE_INFO; +#if COMPILER(CLANG) +#if __has_warning("-Wundefined-var-template") +#pragma clang diagnostic pop +#endif +#endif + + JSClassRef classRef() const { return m_callbackObjectData->jsClass; } + bool inherits(JSClassRef) const; + + static Structure* createStructure(VM&, JSGlobalObject*, JSValue); + + JSValue getPrivateProperty(const Identifier& propertyName) const + { + return m_callbackObjectData->getPrivateProperty(propertyName); + } + + void setPrivateProperty(VM& vm, const Identifier& propertyName, JSValue value) + { + m_callbackObjectData->setPrivateProperty(vm, this, propertyName, value); + } + + void deletePrivateProperty(const Identifier& propertyName) + { + m_callbackObjectData->deletePrivateProperty(propertyName); + } + + using Parent::methodTable; + +private: + static String className(const JSObject*); + + static JSValue defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType); + + static bool getOwnPropertySlot(JSObject*, ExecState*, PropertyName, PropertySlot&); + static bool getOwnPropertySlotByIndex(JSObject*, ExecState*, unsigned propertyName, PropertySlot&); + + static bool put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); + static bool putByIndex(JSCell*, ExecState*, unsigned, JSValue, bool shouldThrow); + + static bool deleteProperty(JSCell*, ExecState*, PropertyName); + static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned); + + static bool customHasInstance(JSObject*, ExecState*, JSValue); + + static void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + + static ConstructType getConstructData(JSCell*, ConstructData&); + static CallType getCallData(JSCell*, CallData&); + + static void visitChildren(JSCell* cell, SlotVisitor& visitor) + { + JSCallbackObject* thisObject = jsCast(cell); + ASSERT_GC_OBJECT_INHERITS((static_cast(thisObject)), JSCallbackObject::info()); + Parent::visitChildren(thisObject, visitor); + thisObject->m_callbackObjectData->visitChildren(visitor); + } + + void init(ExecState*); + + static JSCallbackObject* asCallbackObject(JSValue); + static JSCallbackObject* asCallbackObject(EncodedJSValue); + + static EncodedJSValue JSC_HOST_CALL call(ExecState*); + static EncodedJSValue JSC_HOST_CALL construct(ExecState*); + + JSValue getStaticValue(ExecState*, PropertyName); + static EncodedJSValue staticFunctionGetter(ExecState*, EncodedJSValue, PropertyName); + static EncodedJSValue callbackGetter(ExecState*, EncodedJSValue, PropertyName); + + std::unique_ptr m_callbackObjectData; + const ClassInfo* m_classInfo; +}; + +} // namespace JSC + +// include the actual template class implementation +#include "JSCallbackObjectFunctions.h" + +#endif // JSCallbackObject_h diff --git a/API/JSCallbackObjectFunctions.h b/API/JSCallbackObjectFunctions.h new file mode 100644 index 0000000..a525f5b --- /dev/null +++ b/API/JSCallbackObjectFunctions.h @@ -0,0 +1,701 @@ +/* + * Copyright (C) 2006, 2008, 2016 Apple Inc. All rights reserved. + * Copyright (C) 2007 Eric Seidel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "APICast.h" +#include "Error.h" +#include "ExceptionHelpers.h" +#include "JSCallbackFunction.h" +#include "JSClassRef.h" +#include "JSFunction.h" +#include "JSGlobalObject.h" +#include "JSLock.h" +#include "JSObjectRef.h" +#include "JSString.h" +#include "JSStringRef.h" +#include "OpaqueJSString.h" +#include "PropertyNameArray.h" +#include + +namespace JSC { + +template +inline JSCallbackObject* JSCallbackObject::asCallbackObject(JSValue value) +{ + ASSERT(asObject(value)->inherits(info())); + return jsCast(asObject(value)); +} + +template +inline JSCallbackObject* JSCallbackObject::asCallbackObject(EncodedJSValue value) +{ + ASSERT(asObject(JSValue::decode(value))->inherits(info())); + return jsCast(asObject(JSValue::decode(value))); +} + +template +JSCallbackObject::JSCallbackObject(ExecState* exec, Structure* structure, JSClassRef jsClass, void* data) + : Parent(exec->vm(), structure) + , m_callbackObjectData(std::make_unique(data, jsClass)) +{ +} + +// Global object constructor. +// FIXME: Move this into a separate JSGlobalCallbackObject class derived from this one. +template +JSCallbackObject::JSCallbackObject(VM& vm, JSClassRef jsClass, Structure* structure) + : Parent(vm, structure) + , m_callbackObjectData(std::make_unique(nullptr, jsClass)) +{ +} + +template +JSCallbackObject::~JSCallbackObject() +{ + VM* vm = this->HeapCell::vm(); + vm->currentlyDestructingCallbackObject = this; + ASSERT(m_classInfo); + vm->currentlyDestructingCallbackObjectClassInfo = m_classInfo; + JSObjectRef thisRef = toRef(static_cast(this)); + for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectFinalizeCallback finalize = jsClass->finalize) + finalize(thisRef); + } + vm->currentlyDestructingCallbackObject = nullptr; + vm->currentlyDestructingCallbackObjectClassInfo = nullptr; +} + +template +void JSCallbackObject::finishCreation(ExecState* exec) +{ + Base::finishCreation(exec->vm()); + ASSERT(Parent::inherits(info())); + init(exec); +} + +// This is just for Global object, so we can assume that Base::finishCreation is JSGlobalObject::finishCreation. +template +void JSCallbackObject::finishCreation(VM& vm) +{ + ASSERT(Parent::inherits(info())); + ASSERT(Parent::isGlobalObject()); + Base::finishCreation(vm); + init(jsCast(this)->globalExec()); +} + +template +void JSCallbackObject::init(ExecState* exec) +{ + ASSERT(exec); + + Vector initRoutines; + JSClassRef jsClass = classRef(); + do { + if (JSObjectInitializeCallback initialize = jsClass->initialize) + initRoutines.append(initialize); + } while ((jsClass = jsClass->parentClass)); + + // initialize from base to derived + for (int i = static_cast(initRoutines.size()) - 1; i >= 0; i--) { + JSLock::DropAllLocks dropAllLocks(exec); + JSObjectInitializeCallback initialize = initRoutines[i]; + initialize(toRef(exec), toRef(this)); + } + + m_classInfo = this->classInfo(); +} + +template +String JSCallbackObject::className(const JSObject* object) +{ + const JSCallbackObject* thisObject = jsCast(object); + String thisClassName = thisObject->classRef()->className(); + if (!thisClassName.isEmpty()) + return thisClassName; + + return Parent::className(object); +} + +template +bool JSCallbackObject::getOwnPropertySlot(JSObject* object, ExecState* exec, PropertyName propertyName, PropertySlot& slot) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObject = jsCast(object); + JSContextRef ctx = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + RefPtr propertyNameRef; + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + // optional optimization to bypass getProperty in cases when we only need to know if the property exists + if (JSObjectHasPropertyCallback hasProperty = jsClass->hasProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(name); + JSLock::DropAllLocks dropAllLocks(exec); + if (hasProperty(ctx, thisRef, propertyNameRef.get())) { + slot.setCustom(thisObject, ReadOnly | DontEnum, callbackGetter); + return true; + } + } else if (JSObjectGetPropertyCallback getProperty = jsClass->getProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(name); + JSValueRef exception = 0; + JSValueRef value; + { + JSLock::DropAllLocks dropAllLocks(exec); + value = getProperty(ctx, thisRef, propertyNameRef.get(), &exception); + } + if (exception) { + throwException(exec, scope, toJS(exec, exception)); + slot.setValue(thisObject, ReadOnly | DontEnum, jsUndefined()); + return true; + } + if (value) { + slot.setValue(thisObject, ReadOnly | DontEnum, toJS(exec, value)); + return true; + } + } + + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + if (staticValues->contains(name)) { + JSValue value = thisObject->getStaticValue(exec, propertyName); + if (value) { + slot.setValue(thisObject, ReadOnly | DontEnum, value); + return true; + } + } + } + + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + if (staticFunctions->contains(name)) { + slot.setCustom(thisObject, ReadOnly | DontEnum, staticFunctionGetter); + return true; + } + } + } + } + + return Parent::getOwnPropertySlot(thisObject, exec, propertyName, slot); +} + +template +bool JSCallbackObject::getOwnPropertySlotByIndex(JSObject* object, ExecState* exec, unsigned propertyName, PropertySlot& slot) +{ + return object->methodTable()->getOwnPropertySlot(object, exec, Identifier::from(exec, propertyName), slot); +} + +template +JSValue JSCallbackObject::defaultValue(const JSObject* object, ExecState* exec, PreferredPrimitiveType hint) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + const JSCallbackObject* thisObject = jsCast(object); + JSContextRef ctx = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + ::JSType jsHint = hint == PreferString ? kJSTypeString : kJSTypeNumber; + + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectConvertToTypeCallback convertToType = jsClass->convertToType) { + JSValueRef exception = 0; + JSValueRef result = convertToType(ctx, thisRef, jsHint, &exception); + if (exception) { + throwException(exec, scope, toJS(exec, exception)); + return jsUndefined(); + } + if (result) + return toJS(exec, result); + } + } + + return Parent::defaultValue(object, exec, hint); +} + +template +bool JSCallbackObject::put(JSCell* cell, ExecState* exec, PropertyName propertyName, JSValue value, PutPropertySlot& slot) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObject = jsCast(cell); + JSContextRef ctx = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + RefPtr propertyNameRef; + JSValueRef valueRef = toRef(exec, value); + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectSetPropertyCallback setProperty = jsClass->setProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(name); + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + if (result || exception) + return result; + } + + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + if (StaticValueEntry* entry = staticValues->get(name)) { + if (entry->attributes & kJSPropertyAttributeReadOnly) + return false; + if (JSObjectSetPropertyCallback setProperty = entry->setProperty) { + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = setProperty(ctx, thisRef, entry->propertyNameRef.get(), valueRef, &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + if (result || exception) + return result; + } + } + } + + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + if (StaticFunctionEntry* entry = staticFunctions->get(name)) { + PropertySlot getSlot(thisObject, PropertySlot::InternalMethodType::VMInquiry); + if (Parent::getOwnPropertySlot(thisObject, exec, propertyName, getSlot)) + return Parent::put(thisObject, exec, propertyName, value, slot); + if (entry->attributes & kJSPropertyAttributeReadOnly) + return false; + return thisObject->JSCallbackObject::putDirect(vm, propertyName, value); // put as override property + } + } + } + } + + return Parent::put(thisObject, exec, propertyName, value, slot); +} + +template +bool JSCallbackObject::putByIndex(JSCell* cell, ExecState* exec, unsigned propertyIndex, JSValue value, bool shouldThrow) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObject = jsCast(cell); + JSContextRef ctx = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + RefPtr propertyNameRef; + JSValueRef valueRef = toRef(exec, value); + Identifier propertyName = Identifier::from(exec, propertyIndex); + + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectSetPropertyCallback setProperty = jsClass->setProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(propertyName.impl()); + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + if (result || exception) + return result; + } + + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + if (StaticValueEntry* entry = staticValues->get(propertyName.impl())) { + if (entry->attributes & kJSPropertyAttributeReadOnly) + return false; + if (JSObjectSetPropertyCallback setProperty = entry->setProperty) { + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = setProperty(ctx, thisRef, entry->propertyNameRef.get(), valueRef, &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + if (result || exception) + return result; + } + } + } + + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + if (StaticFunctionEntry* entry = staticFunctions->get(propertyName.impl())) { + if (entry->attributes & kJSPropertyAttributeReadOnly) + return false; + break; + } + } + } + + return Parent::putByIndex(thisObject, exec, propertyIndex, value, shouldThrow); +} + +template +bool JSCallbackObject::deleteProperty(JSCell* cell, ExecState* exec, PropertyName propertyName) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObject = jsCast(cell); + JSContextRef ctx = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + RefPtr propertyNameRef; + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectDeletePropertyCallback deleteProperty = jsClass->deleteProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(name); + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = deleteProperty(ctx, thisRef, propertyNameRef.get(), &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + if (result || exception) + return true; + } + + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + if (StaticValueEntry* entry = staticValues->get(name)) { + if (entry->attributes & kJSPropertyAttributeDontDelete) + return false; + return true; + } + } + + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + if (StaticFunctionEntry* entry = staticFunctions->get(name)) { + if (entry->attributes & kJSPropertyAttributeDontDelete) + return false; + return true; + } + } + } + } + + return Parent::deleteProperty(thisObject, exec, propertyName); +} + +template +bool JSCallbackObject::deletePropertyByIndex(JSCell* cell, ExecState* exec, unsigned propertyName) +{ + JSCallbackObject* thisObject = jsCast(cell); + return thisObject->methodTable()->deleteProperty(thisObject, exec, Identifier::from(exec, propertyName)); +} + +template +ConstructType JSCallbackObject::getConstructData(JSCell* cell, ConstructData& constructData) +{ + JSCallbackObject* thisObject = jsCast(cell); + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (jsClass->callAsConstructor) { + constructData.native.function = construct; + return ConstructType::Host; + } + } + return ConstructType::None; +} + +template +EncodedJSValue JSCallbackObject::construct(ExecState* exec) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSObject* constructor = exec->jsCallee(); + JSContextRef execRef = toRef(exec); + JSObjectRef constructorRef = toRef(constructor); + + for (JSClassRef jsClass = jsCast*>(constructor)->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectCallAsConstructorCallback callAsConstructor = jsClass->callAsConstructor) { + size_t argumentCount = exec->argumentCount(); + Vector arguments; + arguments.reserveInitialCapacity(argumentCount); + for (size_t i = 0; i < argumentCount; ++i) + arguments.uncheckedAppend(toRef(exec, exec->uncheckedArgument(i))); + JSValueRef exception = 0; + JSObject* result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = toJS(callAsConstructor(execRef, constructorRef, argumentCount, arguments.data(), &exception)); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + return JSValue::encode(result); + } + } + + RELEASE_ASSERT_NOT_REACHED(); // getConstructData should prevent us from reaching here + return JSValue::encode(JSValue()); +} + +template +bool JSCallbackObject::customHasInstance(JSObject* object, ExecState* exec, JSValue value) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObject = jsCast(object); + JSContextRef execRef = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectHasInstanceCallback hasInstance = jsClass->hasInstance) { + JSValueRef valueRef = toRef(exec, value); + JSValueRef exception = 0; + bool result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = hasInstance(execRef, thisRef, valueRef, &exception); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + return result; + } + } + return false; +} + +template +CallType JSCallbackObject::getCallData(JSCell* cell, CallData& callData) +{ + JSCallbackObject* thisObject = jsCast(cell); + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (jsClass->callAsFunction) { + callData.native.function = call; + return CallType::Host; + } + } + return CallType::None; +} + +template +EncodedJSValue JSCallbackObject::call(ExecState* exec) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSContextRef execRef = toRef(exec); + JSObjectRef functionRef = toRef(exec->jsCallee()); + JSObjectRef thisObjRef = toRef(jsCast(exec->thisValue().toThis(exec, NotStrictMode))); + + for (JSClassRef jsClass = jsCast*>(toJS(functionRef))->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectCallAsFunctionCallback callAsFunction = jsClass->callAsFunction) { + size_t argumentCount = exec->argumentCount(); + Vector arguments; + arguments.reserveInitialCapacity(argumentCount); + for (size_t i = 0; i < argumentCount; ++i) + arguments.uncheckedAppend(toRef(exec, exec->uncheckedArgument(i))); + JSValueRef exception = 0; + JSValue result; + { + JSLock::DropAllLocks dropAllLocks(exec); + result = toJS(exec, callAsFunction(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), &exception)); + } + if (exception) + throwException(exec, scope, toJS(exec, exception)); + return JSValue::encode(result); + } + } + + RELEASE_ASSERT_NOT_REACHED(); // getCallData should prevent us from reaching here + return JSValue::encode(JSValue()); +} + +template +void JSCallbackObject::getOwnNonIndexPropertyNames(JSObject* object, ExecState* exec, PropertyNameArray& propertyNames, EnumerationMode mode) +{ + JSCallbackObject* thisObject = jsCast(object); + JSContextRef execRef = toRef(exec); + JSObjectRef thisRef = toRef(thisObject); + + for (JSClassRef jsClass = thisObject->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectGetPropertyNamesCallback getPropertyNames = jsClass->getPropertyNames) { + JSLock::DropAllLocks dropAllLocks(exec); + getPropertyNames(execRef, thisRef, toRef(&propertyNames)); + } + + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + typedef OpaqueJSClassStaticValuesTable::const_iterator iterator; + iterator end = staticValues->end(); + for (iterator it = staticValues->begin(); it != end; ++it) { + StringImpl* name = it->key.get(); + StaticValueEntry* entry = it->value.get(); + if (entry->getProperty && (!(entry->attributes & kJSPropertyAttributeDontEnum) || mode.includeDontEnumProperties())) { + ASSERT(!name->isSymbol()); + propertyNames.add(Identifier::fromString(exec, String(name))); + } + } + } + + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + typedef OpaqueJSClassStaticFunctionsTable::const_iterator iterator; + iterator end = staticFunctions->end(); + for (iterator it = staticFunctions->begin(); it != end; ++it) { + StringImpl* name = it->key.get(); + StaticFunctionEntry* entry = it->value.get(); + if (!(entry->attributes & kJSPropertyAttributeDontEnum) || mode.includeDontEnumProperties()) { + ASSERT(!name->isSymbol()); + propertyNames.add(Identifier::fromString(exec, String(name))); + } + } + } + } + + Parent::getOwnNonIndexPropertyNames(thisObject, exec, propertyNames, mode); +} + +template +void JSCallbackObject::setPrivate(void* data) +{ + m_callbackObjectData->privateData = data; +} + +template +void* JSCallbackObject::getPrivate() +{ + return m_callbackObjectData->privateData; +} + +template +bool JSCallbackObject::inherits(JSClassRef c) const +{ + for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { + if (jsClass == c) + return true; + } + return false; +} + +template +JSValue JSCallbackObject::getStaticValue(ExecState* exec, PropertyName propertyName) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSObjectRef thisRef = toRef(this); + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { + if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { + if (StaticValueEntry* entry = staticValues->get(name)) { + if (JSObjectGetPropertyCallback getProperty = entry->getProperty) { + JSValueRef exception = 0; + JSValueRef value; + { + JSLock::DropAllLocks dropAllLocks(exec); + value = getProperty(toRef(exec), thisRef, entry->propertyNameRef.get(), &exception); + } + if (exception) { + throwException(exec, scope, toJS(exec, exception)); + return jsUndefined(); + } + if (value) + return toJS(exec, value); + } + } + } + } + } + + return JSValue(); +} + +template +EncodedJSValue JSCallbackObject::staticFunctionGetter(ExecState* exec, EncodedJSValue thisValue, PropertyName propertyName) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObj = asCallbackObject(thisValue); + + // Check for cached or override property. + PropertySlot slot2(thisObj, PropertySlot::InternalMethodType::VMInquiry); + if (Parent::getOwnPropertySlot(thisObj, exec, propertyName, slot2)) + return JSValue::encode(slot2.getValue(exec, propertyName)); + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = thisObj->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (OpaqueJSClassStaticFunctionsTable* staticFunctions = jsClass->staticFunctions(exec)) { + if (StaticFunctionEntry* entry = staticFunctions->get(name)) { + if (JSObjectCallAsFunctionCallback callAsFunction = entry->callAsFunction) { + JSObject* o = JSCallbackFunction::create(vm, thisObj->globalObject(), callAsFunction, name); + thisObj->putDirect(vm, propertyName, o, entry->attributes); + return JSValue::encode(o); + } + } + } + } + } + + return JSValue::encode(throwException(exec, scope, createReferenceError(exec, ASCIILiteral("Static function property defined with NULL callAsFunction callback.")))); +} + +template +EncodedJSValue JSCallbackObject::callbackGetter(ExecState* exec, EncodedJSValue thisValue, PropertyName propertyName) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + JSCallbackObject* thisObj = asCallbackObject(thisValue); + + JSObjectRef thisRef = toRef(thisObj); + RefPtr propertyNameRef; + + if (StringImpl* name = propertyName.uid()) { + for (JSClassRef jsClass = thisObj->classRef(); jsClass; jsClass = jsClass->parentClass) { + if (JSObjectGetPropertyCallback getProperty = jsClass->getProperty) { + if (!propertyNameRef) + propertyNameRef = OpaqueJSString::create(name); + JSValueRef exception = 0; + JSValueRef value; + { + JSLock::DropAllLocks dropAllLocks(exec); + value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), &exception); + } + if (exception) { + throwException(exec, scope, toJS(exec, exception)); + return JSValue::encode(jsUndefined()); + } + if (value) + return JSValue::encode(toJS(exec, value)); + } + } + } + + return JSValue::encode(throwException(exec, scope, createReferenceError(exec, ASCIILiteral("hasProperty callback returned true for a property that doesn't exist.")))); +} + +} // namespace JSC diff --git a/API/JSClassRef.cpp b/API/JSClassRef.cpp new file mode 100644 index 0000000..eb525f1 --- /dev/null +++ b/API/JSClassRef.cpp @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSClassRef.h" + +#include "APICast.h" +#include "Identifier.h" +#include "InitializeThreading.h" +#include "JSCallbackObject.h" +#include "JSGlobalObject.h" +#include "JSObjectRef.h" +#include "ObjectPrototype.h" +#include "JSCInlines.h" +#include +#include + +using namespace std; +using namespace JSC; +using namespace WTF::Unicode; + +const JSClassDefinition kJSClassDefinitionEmpty = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass* protoClass) + : parentClass(definition->parentClass) + , prototypeClass(0) + , initialize(definition->initialize) + , finalize(definition->finalize) + , hasProperty(definition->hasProperty) + , getProperty(definition->getProperty) + , setProperty(definition->setProperty) + , deleteProperty(definition->deleteProperty) + , getPropertyNames(definition->getPropertyNames) + , callAsFunction(definition->callAsFunction) + , callAsConstructor(definition->callAsConstructor) + , hasInstance(definition->hasInstance) + , convertToType(definition->convertToType) + , m_className(String::fromUTF8(definition->className)) +{ + initializeThreading(); + + if (const JSStaticValue* staticValue = definition->staticValues) { + m_staticValues = std::make_unique(); + while (staticValue->name) { + String valueName = String::fromUTF8(staticValue->name); + if (!valueName.isNull()) + m_staticValues->set(valueName.impl(), std::make_unique(staticValue->getProperty, staticValue->setProperty, staticValue->attributes, valueName)); + ++staticValue; + } + } + + if (const JSStaticFunction* staticFunction = definition->staticFunctions) { + m_staticFunctions = std::make_unique(); + while (staticFunction->name) { + String functionName = String::fromUTF8(staticFunction->name); + if (!functionName.isNull()) + m_staticFunctions->set(functionName.impl(), std::make_unique(staticFunction->callAsFunction, staticFunction->attributes)); + ++staticFunction; + } + } + + if (protoClass) + prototypeClass = JSClassRetain(protoClass); +} + +OpaqueJSClass::~OpaqueJSClass() +{ + // The empty string is shared across threads & is an identifier, in all other cases we should have done a deep copy in className(), below. + ASSERT(!m_className.length() || !m_className.impl()->isAtomic()); + +#ifndef NDEBUG + if (m_staticValues) { + OpaqueJSClassStaticValuesTable::const_iterator end = m_staticValues->end(); + for (OpaqueJSClassStaticValuesTable::const_iterator it = m_staticValues->begin(); it != end; ++it) + ASSERT(!it->key->isAtomic()); + } + + if (m_staticFunctions) { + OpaqueJSClassStaticFunctionsTable::const_iterator end = m_staticFunctions->end(); + for (OpaqueJSClassStaticFunctionsTable::const_iterator it = m_staticFunctions->begin(); it != end; ++it) + ASSERT(!it->key->isAtomic()); + } +#endif + + if (prototypeClass) + JSClassRelease(prototypeClass); +} + +Ref OpaqueJSClass::createNoAutomaticPrototype(const JSClassDefinition* definition) +{ + return adoptRef(*new OpaqueJSClass(definition, 0)); +} + +Ref OpaqueJSClass::create(const JSClassDefinition* clientDefinition) +{ + JSClassDefinition definition = *clientDefinition; // Avoid modifying client copy. + + JSClassDefinition protoDefinition = kJSClassDefinitionEmpty; + protoDefinition.finalize = 0; + swap(definition.staticFunctions, protoDefinition.staticFunctions); // Move static functions to the prototype. + + // We are supposed to use JSClassRetain/Release but since we know that we currently have + // the only reference to this class object we cheat and use a RefPtr instead. + RefPtr protoClass = adoptRef(new OpaqueJSClass(&protoDefinition, 0)); + return adoptRef(*new OpaqueJSClass(&definition, protoClass.get())); +} + +OpaqueJSClassContextData::OpaqueJSClassContextData(JSC::VM&, OpaqueJSClass* jsClass) + : m_class(jsClass) +{ + if (jsClass->m_staticValues) { + staticValues = std::make_unique(); + OpaqueJSClassStaticValuesTable::const_iterator end = jsClass->m_staticValues->end(); + for (OpaqueJSClassStaticValuesTable::const_iterator it = jsClass->m_staticValues->begin(); it != end; ++it) { + ASSERT(!it->key->isAtomic()); + String valueName = it->key->isolatedCopy(); + staticValues->add(valueName.impl(), std::make_unique(it->value->getProperty, it->value->setProperty, it->value->attributes, valueName)); + } + } + + if (jsClass->m_staticFunctions) { + staticFunctions = std::make_unique(); + OpaqueJSClassStaticFunctionsTable::const_iterator end = jsClass->m_staticFunctions->end(); + for (OpaqueJSClassStaticFunctionsTable::const_iterator it = jsClass->m_staticFunctions->begin(); it != end; ++it) { + ASSERT(!it->key->isAtomic()); + staticFunctions->add(it->key->isolatedCopy(), std::make_unique(it->value->callAsFunction, it->value->attributes)); + } + } +} + +OpaqueJSClassContextData& OpaqueJSClass::contextData(ExecState* exec) +{ + std::unique_ptr& contextData = exec->lexicalGlobalObject()->opaqueJSClassData().add(this, nullptr).iterator->value; + if (!contextData) + contextData = std::make_unique(exec->vm(), this); + return *contextData; +} + +String OpaqueJSClass::className() +{ + // Make a deep copy, so that the caller has no chance to put the original into AtomicStringTable. + return m_className.isolatedCopy(); +} + +OpaqueJSClassStaticValuesTable* OpaqueJSClass::staticValues(JSC::ExecState* exec) +{ + return contextData(exec).staticValues.get(); +} + +OpaqueJSClassStaticFunctionsTable* OpaqueJSClass::staticFunctions(JSC::ExecState* exec) +{ + return contextData(exec).staticFunctions.get(); +} + +JSObject* OpaqueJSClass::prototype(ExecState* exec) +{ + /* Class (C++) and prototype (JS) inheritance are parallel, so: + * (C++) | (JS) + * ParentClass | ParentClassPrototype + * ^ | ^ + * | | | + * DerivedClass | DerivedClassPrototype + */ + + if (!prototypeClass) + return 0; + + OpaqueJSClassContextData& jsClassData = contextData(exec); + + if (JSObject* prototype = jsClassData.cachedPrototype.get()) + return prototype; + + // Recursive, but should be good enough for our purposes + JSObject* prototype = JSCallbackObject::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), prototypeClass, &jsClassData); // set jsClassData as the object's private data, so it can clear our reference on destruction + if (parentClass) { + if (JSObject* parentPrototype = parentClass->prototype(exec)) + prototype->setPrototypeDirect(exec->vm(), parentPrototype); + } + + jsClassData.cachedPrototype = Weak(prototype); + return prototype; +} diff --git a/API/JSClassRef.h b/API/JSClassRef.h new file mode 100644 index 0000000..fa024d3 --- /dev/null +++ b/API/JSClassRef.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSClassRef_h +#define JSClassRef_h + +#include "OpaqueJSString.h" +#include "Protect.h" +#include "Weak.h" +#include +#include +#include + +struct StaticValueEntry { + WTF_MAKE_FAST_ALLOCATED; +public: + StaticValueEntry(JSObjectGetPropertyCallback _getProperty, JSObjectSetPropertyCallback _setProperty, JSPropertyAttributes _attributes, String& propertyName) + : getProperty(_getProperty), setProperty(_setProperty), attributes(_attributes), propertyNameRef(OpaqueJSString::create(propertyName)) + { + } + + JSObjectGetPropertyCallback getProperty; + JSObjectSetPropertyCallback setProperty; + JSPropertyAttributes attributes; + RefPtr propertyNameRef; +}; + +struct StaticFunctionEntry { + WTF_MAKE_FAST_ALLOCATED; +public: + StaticFunctionEntry(JSObjectCallAsFunctionCallback _callAsFunction, JSPropertyAttributes _attributes) + : callAsFunction(_callAsFunction), attributes(_attributes) + { + } + + JSObjectCallAsFunctionCallback callAsFunction; + JSPropertyAttributes attributes; +}; + +typedef HashMap, std::unique_ptr> OpaqueJSClassStaticValuesTable; +typedef HashMap, std::unique_ptr> OpaqueJSClassStaticFunctionsTable; + +struct OpaqueJSClass; + +// An OpaqueJSClass (JSClass) is created without a context, so it can be used with any context, even across context groups. +// This structure holds data members that vary across context groups. +struct OpaqueJSClassContextData { + WTF_MAKE_NONCOPYABLE(OpaqueJSClassContextData); WTF_MAKE_FAST_ALLOCATED; +public: + OpaqueJSClassContextData(JSC::VM&, OpaqueJSClass*); + + // It is necessary to keep OpaqueJSClass alive because of the following rare scenario: + // 1. A class is created and used, so its context data is stored in VM hash map. + // 2. The class is released, and when all JS objects that use it are collected, OpaqueJSClass + // is deleted (that's the part prevented by this RefPtr). + // 3. Another class is created at the same address. + // 4. When it is used, the old context data is found in VM and used. + RefPtr m_class; + + std::unique_ptr staticValues; + std::unique_ptr staticFunctions; + JSC::Weak cachedPrototype; +}; + +struct OpaqueJSClass : public ThreadSafeRefCounted { + static Ref create(const JSClassDefinition*); + static Ref createNoAutomaticPrototype(const JSClassDefinition*); + JS_EXPORT_PRIVATE ~OpaqueJSClass(); + + String className(); + OpaqueJSClassStaticValuesTable* staticValues(JSC::ExecState*); + OpaqueJSClassStaticFunctionsTable* staticFunctions(JSC::ExecState*); + JSC::JSObject* prototype(JSC::ExecState*); + + OpaqueJSClass* parentClass; + OpaqueJSClass* prototypeClass; + + JSObjectInitializeCallback initialize; + JSObjectFinalizeCallback finalize; + JSObjectHasPropertyCallback hasProperty; + JSObjectGetPropertyCallback getProperty; + JSObjectSetPropertyCallback setProperty; + JSObjectDeletePropertyCallback deleteProperty; + JSObjectGetPropertyNamesCallback getPropertyNames; + JSObjectCallAsFunctionCallback callAsFunction; + JSObjectCallAsConstructorCallback callAsConstructor; + JSObjectHasInstanceCallback hasInstance; + JSObjectConvertToTypeCallback convertToType; + +private: + friend struct OpaqueJSClassContextData; + + OpaqueJSClass(); + OpaqueJSClass(const OpaqueJSClass&); + OpaqueJSClass(const JSClassDefinition*, OpaqueJSClass* protoClass); + + OpaqueJSClassContextData& contextData(JSC::ExecState*); + + // Strings in these data members should not be put into any AtomicStringTable. + String m_className; + std::unique_ptr m_staticValues; + std::unique_ptr m_staticFunctions; +}; + +#endif // JSClassRef_h diff --git a/API/JSContext.h b/API/JSContext.h new file mode 100644 index 0000000..194e352 --- /dev/null +++ b/API/JSContext.h @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContext_h +#define JSContext_h + +#include +#include + +#if JSC_OBJC_API_ENABLED + +@class JSVirtualMachine, JSValue; + +/*! +@interface +@discussion A JSContext is a JavaScript execution environment. All + JavaScript execution takes place within a context, and all JavaScript values + are tied to a context. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSContext : NSObject + +/*! +@methodgroup Creating New JSContexts +*/ +/*! +@method +@abstract Create a JSContext. +@result The new context. +*/ +- (instancetype)init; + +/*! +@method +@abstract Create a JSContext in the specified virtual machine. +@param virtualMachine The JSVirtualMachine in which the context will be created. +@result The new context. +*/ +- (instancetype)initWithVirtualMachine:(JSVirtualMachine *)virtualMachine; + +/*! +@methodgroup Evaluating Scripts +*/ +/*! +@method +@abstract Evaluate a string of JavaScript code. +@param script A string containing the JavaScript code to evaluate. +@result The last value generated by the script. +*/ +- (JSValue *)evaluateScript:(NSString *)script; + +/*! +@method +@abstract Evaluate a string of JavaScript code, with a URL for the script's source file. +@param script A string containing the JavaScript code to evaluate. +@param sourceURL A URL for the script's source file. Used by debuggers and when reporting exceptions. This parameter is informative only: it does not change the behavior of the script. +@result The last value generated by the script. +*/ +- (JSValue *)evaluateScript:(NSString *)script withSourceURL:(NSURL *)sourceURL NS_AVAILABLE(10_10, 8_0); + +/*! +@methodgroup Callback Accessors +*/ +/*! +@method +@abstract Get the JSContext that is currently executing. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's context. Outside of + a callback from JavaScript this method will return nil. +@result The currently executing JSContext or nil if there isn't one. +*/ ++ (JSContext *)currentContext; + +/*! +@method +@abstract Get the JavaScript function that is currently executing. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's context. Outside of + a callback from JavaScript this method will return nil. +@result The currently executing JavaScript function or nil if there isn't one. +*/ ++ (JSValue *)currentCallee NS_AVAILABLE(10_10, 8_0); + +/*! +@method +@abstract Get the this value of the currently executing method. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's this value. Outside + of a callback from JavaScript this method will return nil. +@result The current this value or nil if there isn't one. +*/ ++ (JSValue *)currentThis; + +/*! +@method +@abstract Get the arguments to the current callback. +@discussion This method may be called from within an Objective-C block or method invoked + as a callback from JavaScript to retrieve the callback's arguments, objects + in the returned array are instances of JSValue. Outside of a callback from + JavaScript this method will return nil. +@result An NSArray of the arguments nil if there is no current callback. +*/ ++ (NSArray *)currentArguments; + +/*! +@functiongroup Global Properties +*/ + +/*! +@property +@abstract Get the global object of the context. +@discussion This method retrieves the global object of the JavaScript execution context. + Instances of JSContext originating from WebKit will return a reference to the + WindowProxy object. +@result The global object. +*/ +@property (readonly, strong) JSValue *globalObject; + +/*! +@property +@discussion The exception property may be used to throw an exception to JavaScript. + + Before a callback is made from JavaScript to an Objective-C block or method, + the prior value of the exception property will be preserved and the property + will be set to nil. After the callback has completed the new value of the + exception property will be read, and prior value restored. If the new value + of exception is not nil, the callback will result in that value being thrown. + + This property may also be used to check for uncaught exceptions arising from + API function calls (since the default behaviour of exceptionHandler is to + assign an uncaught exception to this property). +*/ +@property (strong) JSValue *exception; + +/*! +@property +@discussion If a call to an API function results in an uncaught JavaScript exception, the + exceptionHandler block will be invoked. The default implementation for the + exception handler will store the exception to the exception property on + context. As a consequence the default behaviour is for uncaught exceptions + occurring within a callback from JavaScript to be rethrown upon return. + Setting this value to nil will cause all exceptions occurring + within a callback from JavaScript to be silently caught. +*/ +@property (copy) void(^exceptionHandler)(JSContext *context, JSValue *exception); + +/*! +@property +@discussion All instances of JSContext are associated with a JSVirtualMachine. +*/ +@property (readonly, strong) JSVirtualMachine *virtualMachine; + +/*! +@property +@discussion Name of the JSContext. Exposed when remote debugging the context. +*/ +@property (copy) NSString *name NS_AVAILABLE(10_10, 8_0); + +@end + +/*! +@category +@discussion Instances of JSContext implement the following methods in order to enable + support for subscript access by key and index, for example: + +@textblock + JSContext *context; + JSValue *v = context[@"X"]; // Get value for "X" from the global object. + context[@"Y"] = v; // Assign 'v' to "Y" on the global object. +@/textblock + + An object key passed as a subscript will be converted to a JavaScript value, + and then the value converted to a string used to resolve a property of the + global object. +*/ +@interface JSContext (SubscriptSupport) + +/*! +@method +@abstract Get a particular property on the global object. +@result The JSValue for the global object's property. +*/ +- (JSValue *)objectForKeyedSubscript:(id)key; + +/*! +@method +@abstract Set a particular property on the global object. +*/ +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key; + +@end + +/*! +@category +@discussion These functions are for bridging between the C API and the Objective-C API. +*/ +@interface JSContext (JSContextRefSupport) + +/*! +@method +@abstract Create a JSContext, wrapping its C API counterpart. +@result The JSContext equivalent of the provided JSGlobalContextRef. +*/ ++ (JSContext *)contextWithJSGlobalContextRef:(JSGlobalContextRef)jsGlobalContextRef; + +/*! +@property +@abstract Get the C API counterpart wrapped by a JSContext. +@result The C API equivalent of this JSContext. +*/ +@property (readonly) JSGlobalContextRef JSGlobalContextRef; +@end + +#endif + +#endif // JSContext_h diff --git a/API/JSContext.mm b/API/JSContext.mm new file mode 100644 index 0000000..3291622 --- /dev/null +++ b/API/JSContext.mm @@ -0,0 +1,354 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#import "APICast.h" +#import "JSCInlines.h" +#import "JSContextInternal.h" +#import "JSContextPrivate.h" +#import "JSContextRefInternal.h" +#import "JSGlobalObject.h" +#import "JSValueInternal.h" +#import "JSVirtualMachineInternal.h" +#import "JSWrapperMap.h" +#import "JavaScriptCore.h" +#import "ObjcRuntimeExtras.h" +#import "StrongInlines.h" + +#if JSC_OBJC_API_ENABLED + +@implementation JSContext { + JSVirtualMachine *m_virtualMachine; + JSGlobalContextRef m_context; + JSWrapperMap *m_wrapperMap; + JSC::Strong m_exception; +} + +@synthesize exceptionHandler; + +- (JSGlobalContextRef)JSGlobalContextRef +{ + return m_context; +} + +- (instancetype)init +{ + return [self initWithVirtualMachine:[[[JSVirtualMachine alloc] init] autorelease]]; +} + +- (instancetype)initWithVirtualMachine:(JSVirtualMachine *)virtualMachine +{ + self = [super init]; + if (!self) + return nil; + + m_virtualMachine = [virtualMachine retain]; + m_context = JSGlobalContextCreateInGroup(getGroupFromVirtualMachine(virtualMachine), 0); + m_wrapperMap = [[JSWrapperMap alloc] initWithContext:self]; + + self.exceptionHandler = ^(JSContext *context, JSValue *exceptionValue) { + context.exception = exceptionValue; + }; + + [m_virtualMachine addContext:self forGlobalContextRef:m_context]; + + return self; +} + +- (void)dealloc +{ + m_exception.clear(); + [m_wrapperMap release]; + JSGlobalContextRelease(m_context); + [m_virtualMachine release]; + [self.exceptionHandler release]; + [super dealloc]; +} + +- (JSValue *)evaluateScript:(NSString *)script +{ + return [self evaluateScript:script withSourceURL:nil]; +} + +- (JSValue *)evaluateScript:(NSString *)script withSourceURL:(NSURL *)sourceURL +{ + JSValueRef exceptionValue = nullptr; + JSStringRef scriptJS = JSStringCreateWithCFString((CFStringRef)script); + JSStringRef sourceURLJS = sourceURL ? JSStringCreateWithCFString((CFStringRef)[sourceURL absoluteString]) : nullptr; + JSValueRef result = JSEvaluateScript(m_context, scriptJS, nullptr, sourceURLJS, 0, &exceptionValue); + if (sourceURLJS) + JSStringRelease(sourceURLJS); + JSStringRelease(scriptJS); + + if (exceptionValue) + return [self valueFromNotifyException:exceptionValue]; + + return [JSValue valueWithJSValueRef:result inContext:self]; +} + +- (void)setException:(JSValue *)value +{ + JSC::JSLockHolder locker(toJS(m_context)); + if (value) + m_exception.set(toJS(m_context)->vm(), toJS(JSValueToObject(m_context, valueInternalValue(value), 0))); + else + m_exception.clear(); +} + +- (JSValue *)exception +{ + if (!m_exception) + return nil; + return [JSValue valueWithJSValueRef:toRef(m_exception.get()) inContext:self]; +} + +- (JSWrapperMap *)wrapperMap +{ + return m_wrapperMap; +} + +- (JSValue *)globalObject +{ + return [JSValue valueWithJSValueRef:JSContextGetGlobalObject(m_context) inContext:self]; +} + ++ (JSContext *)currentContext +{ + WTFThreadData& threadData = wtfThreadData(); + CallbackData *entry = (CallbackData *)threadData.m_apiData; + return entry ? entry->context : nil; +} + ++ (JSValue *)currentThis +{ + WTFThreadData& threadData = wtfThreadData(); + CallbackData *entry = (CallbackData *)threadData.m_apiData; + if (!entry) + return nil; + return [JSValue valueWithJSValueRef:entry->thisValue inContext:[JSContext currentContext]]; +} + ++ (JSValue *)currentCallee +{ + WTFThreadData& threadData = wtfThreadData(); + CallbackData *entry = (CallbackData *)threadData.m_apiData; + if (!entry) + return nil; + return [JSValue valueWithJSValueRef:entry->calleeValue inContext:[JSContext currentContext]]; +} + ++ (NSArray *)currentArguments +{ + WTFThreadData& threadData = wtfThreadData(); + CallbackData *entry = (CallbackData *)threadData.m_apiData; + + if (!entry) + return nil; + + if (!entry->currentArguments) { + JSContext *context = [JSContext currentContext]; + size_t count = entry->argumentCount; + JSValue * argumentArray[count]; + for (size_t i =0; i < count; ++i) + argumentArray[i] = [JSValue valueWithJSValueRef:entry->arguments[i] inContext:context]; + entry->currentArguments = [[NSArray alloc] initWithObjects:argumentArray count:count]; + } + + return entry->currentArguments; +} + +- (JSVirtualMachine *)virtualMachine +{ + return m_virtualMachine; +} + +- (NSString *)name +{ + JSStringRef name = JSGlobalContextCopyName(m_context); + if (!name) + return nil; + + return (NSString *)adoptCF(JSStringCopyCFString(kCFAllocatorDefault, name)).autorelease(); +} + +- (void)setName:(NSString *)name +{ + JSStringRef nameJS = name ? JSStringCreateWithCFString((CFStringRef)[[name copy] autorelease]) : nullptr; + JSGlobalContextSetName(m_context, nameJS); + if (nameJS) + JSStringRelease(nameJS); +} + +- (BOOL)_remoteInspectionEnabled +{ + return JSGlobalContextGetRemoteInspectionEnabled(m_context); +} + +- (void)_setRemoteInspectionEnabled:(BOOL)enabled +{ + JSGlobalContextSetRemoteInspectionEnabled(m_context, enabled); +} + +- (BOOL)_includesNativeCallStackWhenReportingExceptions +{ + return JSGlobalContextGetIncludesNativeCallStackWhenReportingExceptions(m_context); +} + +- (void)_setIncludesNativeCallStackWhenReportingExceptions:(BOOL)includeNativeCallStack +{ + JSGlobalContextSetIncludesNativeCallStackWhenReportingExceptions(m_context, includeNativeCallStack); +} + +- (CFRunLoopRef)_debuggerRunLoop +{ + return JSGlobalContextGetDebuggerRunLoop(m_context); +} + +- (void)_setDebuggerRunLoop:(CFRunLoopRef)runLoop +{ + JSGlobalContextSetDebuggerRunLoop(m_context, runLoop); +} + +@end + +@implementation JSContext(SubscriptSupport) + +- (JSValue *)objectForKeyedSubscript:(id)key +{ + return [self globalObject][key]; +} + +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key +{ + [self globalObject][key] = object; +} + +@end + +@implementation JSContext (Internal) + +- (instancetype)initWithGlobalContextRef:(JSGlobalContextRef)context +{ + self = [super init]; + if (!self) + return nil; + + JSC::JSGlobalObject* globalObject = toJS(context)->lexicalGlobalObject(); + m_virtualMachine = [[JSVirtualMachine virtualMachineWithContextGroupRef:toRef(&globalObject->vm())] retain]; + ASSERT(m_virtualMachine); + m_context = JSGlobalContextRetain(context); + m_wrapperMap = [[JSWrapperMap alloc] initWithContext:self]; + + self.exceptionHandler = ^(JSContext *context, JSValue *exceptionValue) { + context.exception = exceptionValue; + }; + + [m_virtualMachine addContext:self forGlobalContextRef:m_context]; + + return self; +} + +- (void)notifyException:(JSValueRef)exceptionValue +{ + self.exceptionHandler(self, [JSValue valueWithJSValueRef:exceptionValue inContext:self]); +} + +- (JSValue *)valueFromNotifyException:(JSValueRef)exceptionValue +{ + [self notifyException:exceptionValue]; + return [JSValue valueWithUndefinedInContext:self]; +} + +- (BOOL)boolFromNotifyException:(JSValueRef)exceptionValue +{ + [self notifyException:exceptionValue]; + return NO; +} + +- (void)beginCallbackWithData:(CallbackData *)callbackData calleeValue:(JSValueRef)calleeValue thisValue:(JSValueRef)thisValue argumentCount:(size_t)argumentCount arguments:(const JSValueRef *)arguments +{ + WTFThreadData& threadData = wtfThreadData(); + [self retain]; + CallbackData *prevStack = (CallbackData *)threadData.m_apiData; + *callbackData = (CallbackData){ prevStack, self, [self.exception retain], calleeValue, thisValue, argumentCount, arguments, nil }; + threadData.m_apiData = callbackData; + self.exception = nil; +} + +- (void)endCallbackWithData:(CallbackData *)callbackData +{ + WTFThreadData& threadData = wtfThreadData(); + self.exception = callbackData->preservedException; + [callbackData->preservedException release]; + [callbackData->currentArguments release]; + threadData.m_apiData = callbackData->next; + [self release]; +} + +- (JSValue *)wrapperForObjCObject:(id)object +{ + JSC::JSLockHolder locker(toJS(m_context)); + return [m_wrapperMap jsWrapperForObject:object]; +} + +- (JSValue *)wrapperForJSObject:(JSValueRef)value +{ + JSC::JSLockHolder locker(toJS(m_context)); + return [m_wrapperMap objcWrapperForJSValueRef:value]; +} + ++ (JSContext *)contextWithJSGlobalContextRef:(JSGlobalContextRef)globalContext +{ + JSVirtualMachine *virtualMachine = [JSVirtualMachine virtualMachineWithContextGroupRef:toRef(&toJS(globalContext)->vm())]; + JSContext *context = [virtualMachine contextForGlobalContextRef:globalContext]; + if (!context) + context = [[[JSContext alloc] initWithGlobalContextRef:globalContext] autorelease]; + return context; +} + +@end + +WeakContextRef::WeakContextRef(JSContext *context) +{ + objc_initWeak(&m_weakContext, context); +} + +WeakContextRef::~WeakContextRef() +{ + objc_destroyWeak(&m_weakContext); +} + +JSContext * WeakContextRef::get() +{ + return objc_loadWeak(&m_weakContext); +} + +void WeakContextRef::set(JSContext *context) +{ + objc_storeWeak(&m_weakContext, context); +} + +#endif diff --git a/API/JSContextInternal.h b/API/JSContextInternal.h new file mode 100644 index 0000000..5308fbb --- /dev/null +++ b/API/JSContextInternal.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextInternal_h +#define JSContextInternal_h + +#import + +#if JSC_OBJC_API_ENABLED + +#import + +struct CallbackData { + CallbackData *next; + JSContext *context; + JSValue *preservedException; + JSValueRef calleeValue; + JSValueRef thisValue; + size_t argumentCount; + const JSValueRef *arguments; + NSArray *currentArguments; +}; + +class WeakContextRef { +public: + WeakContextRef(JSContext * = nil); + ~WeakContextRef(); + + JSContext * get(); + void set(JSContext *); + +private: + JSContext *m_weakContext; +}; + +@class JSWrapperMap; + +@interface JSContext(Internal) + +- (id)initWithGlobalContextRef:(JSGlobalContextRef)context; + +- (void)notifyException:(JSValueRef)exception; +- (JSValue *)valueFromNotifyException:(JSValueRef)exception; +- (BOOL)boolFromNotifyException:(JSValueRef)exception; + +- (void)beginCallbackWithData:(CallbackData *)callbackData calleeValue:(JSValueRef)calleeValue thisValue:(JSValueRef)thisValue argumentCount:(size_t)argumentCount arguments:(const JSValueRef *)arguments; +- (void)endCallbackWithData:(CallbackData *)callbackData; + +- (JSValue *)wrapperForObjCObject:(id)object; +- (JSValue *)wrapperForJSObject:(JSValueRef)value; + +@property (readonly, retain) JSWrapperMap *wrapperMap; + +@end + +#endif + +#endif // JSContextInternal_h diff --git a/API/JSContextPrivate.h b/API/JSContextPrivate.h new file mode 100644 index 0000000..7d1d0cb --- /dev/null +++ b/API/JSContextPrivate.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextPrivate_h +#define JSContextPrivate_h + +#if JSC_OBJC_API_ENABLED + +#import + +@interface JSContext(Private) + +/*! +@property +@discussion Remote inspection setting of the JSContext. Default value is YES. +*/ +@property (setter=_setRemoteInspectionEnabled:) BOOL _remoteInspectionEnabled NS_AVAILABLE(10_10, 8_0); + +/*! +@property +@discussion Set whether or not the native call stack is included when reporting exceptions. Default value is YES. +*/ +@property (setter=_setIncludesNativeCallStackWhenReportingExceptions:) BOOL _includesNativeCallStackWhenReportingExceptions NS_AVAILABLE(10_10, 8_0); + +/*! +@property +@discussion Set the run loop the Web Inspector debugger should use when evaluating JavaScript in the JSContext. +*/ +@property (setter=_setDebuggerRunLoop:) CFRunLoopRef _debuggerRunLoop NS_AVAILABLE(10_10, 8_0); + +@end + +#endif + +#endif // JSContextInternal_h diff --git a/API/JSContextRef.cpp b/API/JSContextRef.cpp new file mode 100644 index 0000000..13769df --- /dev/null +++ b/API/JSContextRef.cpp @@ -0,0 +1,431 @@ +/* + * Copyright (C) 2006, 2007, 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSContextRef.h" +#include "JSContextRefInternal.h" + +#include "APICast.h" +#include "CallFrame.h" +#include "InitializeThreading.h" +#include "JSCallbackObject.h" +#include "JSClassRef.h" +#include "JSGlobalObject.h" +#include "JSObject.h" +#include "JSCInlines.h" +#include "SourceProvider.h" +#include "StackVisitor.h" +#include "Watchdog.h" +#include +#include + +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectDebuggable.h" +#include "JSGlobalObjectInspectorController.h" +#include "JSRemoteInspector.h" +#endif + +#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) +#include "JSContextRefInspectorSupport.h" +#endif + +#if OS(DARWIN) +#include + +static const int32_t webkitFirstVersionWithConcurrentGlobalContexts = 0x2100500; // 528.5.0 +#endif + +using namespace JSC; + +// From the API's perspective, a context group remains alive iff +// (a) it has been JSContextGroupRetained +// OR +// (b) one of its contexts has been JSContextRetained + +JSContextGroupRef JSContextGroupCreate() +{ + initializeThreading(); + return toRef(&VM::createContextGroup().leakRef()); +} + +JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) +{ + toJS(group)->ref(); + return group; +} + +void JSContextGroupRelease(JSContextGroupRef group) +{ + VM& vm = *toJS(group); + + JSLockHolder locker(&vm); + vm.deref(); +} + +static bool internalScriptTimeoutCallback(ExecState* exec, void* callbackPtr, void* callbackData) +{ + JSShouldTerminateCallback callback = reinterpret_cast(callbackPtr); + JSContextRef contextRef = toRef(exec); + ASSERT(callback); + return callback(contextRef, callbackData); +} + +void JSContextGroupSetExecutionTimeLimit(JSContextGroupRef group, double limit, JSShouldTerminateCallback callback, void* callbackData) +{ + VM& vm = *toJS(group); + JSLockHolder locker(&vm); + Watchdog& watchdog = vm.ensureWatchdog(); + if (callback) { + void* callbackPtr = reinterpret_cast(callback); + watchdog.setTimeLimit(std::chrono::duration_cast(std::chrono::duration(limit)), internalScriptTimeoutCallback, callbackPtr, callbackData); + } else + watchdog.setTimeLimit(std::chrono::duration_cast(std::chrono::duration(limit))); +} + +void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef group) +{ + VM& vm = *toJS(group); + JSLockHolder locker(&vm); + if (vm.watchdog()) + vm.watchdog()->setTimeLimit(Watchdog::noTimeLimit); +} + +// From the API's perspective, a global context remains alive iff it has been JSGlobalContextRetained. + +JSGlobalContextRef JSGlobalContextCreate(JSClassRef globalObjectClass) +{ + initializeThreading(); + +#if OS(DARWIN) + // If the application was linked before JSGlobalContextCreate was changed to use a unique VM, + // we use a shared one for backwards compatibility. + if (NSVersionOfLinkTimeLibrary("JavaScriptCore") <= webkitFirstVersionWithConcurrentGlobalContexts) { + return JSGlobalContextCreateInGroup(toRef(&VM::sharedInstance()), globalObjectClass); + } +#endif // OS(DARWIN) + + return JSGlobalContextCreateInGroup(0, globalObjectClass); +} + +JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClassRef globalObjectClass) +{ + initializeThreading(); + + RefPtr vm = group ? PassRefPtr(toJS(group)) : VM::createContextGroup(); + + JSLockHolder locker(vm.get()); + + if (!globalObjectClass) { + JSGlobalObject* globalObject = JSGlobalObject::create(*vm, JSGlobalObject::createStructure(*vm, jsNull())); +#if ENABLE(REMOTE_INSPECTOR) + if (JSRemoteInspectorGetInspectionEnabledByDefault()) + globalObject->setRemoteDebuggingEnabled(true); +#endif + return JSGlobalContextRetain(toGlobalRef(globalObject->globalExec())); + } + + JSGlobalObject* globalObject = JSCallbackObject::create(*vm, globalObjectClass, JSCallbackObject::createStructure(*vm, 0, jsNull())); + ExecState* exec = globalObject->globalExec(); + JSValue prototype = globalObjectClass->prototype(exec); + if (!prototype) + prototype = jsNull(); + globalObject->resetPrototype(*vm, prototype); +#if ENABLE(REMOTE_INSPECTOR) + if (JSRemoteInspectorGetInspectionEnabledByDefault()) + globalObject->setRemoteDebuggingEnabled(true); +#endif + return JSGlobalContextRetain(toGlobalRef(exec)); +} + +JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + VM& vm = exec->vm(); + gcProtect(exec->vmEntryGlobalObject()); + vm.ref(); + return ctx; +} + +void JSGlobalContextRelease(JSGlobalContextRef ctx) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + VM& vm = exec->vm(); + bool protectCountIsZero = Heap::heap(exec->vmEntryGlobalObject())->unprotect(exec->vmEntryGlobalObject()); + if (protectCountIsZero) + vm.heap.reportAbandonedObjectGraph(); + vm.deref(); +} + +JSObjectRef JSContextGetGlobalObject(JSContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(jsCast(exec->lexicalGlobalObject()->methodTable()->toThis(exec->lexicalGlobalObject(), exec, NotStrictMode))); +} + +JSContextGroupRef JSContextGetGroup(JSContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + return toRef(&exec->vm()); +} + +JSGlobalContextRef JSContextGetGlobalContext(JSContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toGlobalRef(exec->lexicalGlobalObject()->globalExec()); +} + +JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + String name = exec->vmEntryGlobalObject()->name(); + if (name.isNull()) + return 0; + + return OpaqueJSString::create(name).leakRef(); +} + +void JSGlobalContextSetName(JSGlobalContextRef ctx, JSStringRef name) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + exec->vmEntryGlobalObject()->setName(name ? name->string() : String()); +} + + +class BacktraceFunctor { +public: + BacktraceFunctor(StringBuilder& builder, unsigned remainingCapacityForFrameCapture) + : m_builder(builder) + , m_remainingCapacityForFrameCapture(remainingCapacityForFrameCapture) + { + } + + StackVisitor::Status operator()(StackVisitor& visitor) const + { + if (m_remainingCapacityForFrameCapture) { + // If callee is unknown, but we've not added any frame yet, we should + // still add the frame, because something called us, and gave us arguments. + JSCell* callee = visitor->callee(); + if (!callee && visitor->index()) + return StackVisitor::Done; + + StringBuilder& builder = m_builder; + if (!builder.isEmpty()) + builder.append('\n'); + builder.append('#'); + builder.appendNumber(visitor->index()); + builder.append(' '); + builder.append(visitor->functionName()); + builder.appendLiteral("() at "); + builder.append(visitor->sourceURL()); + if (visitor->hasLineAndColumnInfo()) { + builder.append(':'); + unsigned lineNumber; + unsigned unusedColumn; + visitor->computeLineAndColumn(lineNumber, unusedColumn); + builder.appendNumber(lineNumber); + } + + if (!callee) + return StackVisitor::Done; + + m_remainingCapacityForFrameCapture--; + return StackVisitor::Continue; + } + return StackVisitor::Done; + } + +private: + StringBuilder& m_builder; + mutable unsigned m_remainingCapacityForFrameCapture; +}; + +JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + StringBuilder builder; + CallFrame* frame = exec->vm().topCallFrame; + + ASSERT(maxStackSize); + BacktraceFunctor functor(builder, maxStackSize); + frame->iterate(functor); + + return OpaqueJSString::create(builder.toString()).leakRef(); +} + +bool JSGlobalContextGetRemoteInspectionEnabled(JSGlobalContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return exec->vmEntryGlobalObject()->remoteDebuggingEnabled(); +} + +void JSGlobalContextSetRemoteInspectionEnabled(JSGlobalContextRef ctx, bool enabled) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + exec->vmEntryGlobalObject()->setRemoteDebuggingEnabled(enabled); +} + +bool JSGlobalContextGetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); + return globalObject->inspectorController().includesNativeCallStackWhenReportingExceptions(); +#else + UNUSED_PARAM(ctx); + return false; +#endif +} + +void JSGlobalContextSetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx, bool includesNativeCallStack) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + JSGlobalObject* globalObject = exec->vmEntryGlobalObject(); + globalObject->inspectorController().setIncludesNativeCallStackWhenReportingExceptions(includesNativeCallStack); +#else + UNUSED_PARAM(ctx); + UNUSED_PARAM(includesNativeCallStack); +#endif +} + +#if USE(CF) +CFRunLoopRef JSGlobalContextGetDebuggerRunLoop(JSGlobalContextRef ctx) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return nullptr; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return exec->vmEntryGlobalObject()->inspectorDebuggable().targetRunLoop(); +#else + UNUSED_PARAM(ctx); + return nullptr; +#endif +} + +void JSGlobalContextSetDebuggerRunLoop(JSGlobalContextRef ctx, CFRunLoopRef runLoop) +{ +#if ENABLE(REMOTE_INSPECTOR) + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + exec->vmEntryGlobalObject()->inspectorDebuggable().setTargetRunLoop(runLoop); +#else + UNUSED_PARAM(ctx); + UNUSED_PARAM(runLoop); +#endif +} +#endif // USE(CF) + +#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS) +Inspector::AugmentableInspectorController* JSGlobalContextGetAugmentableInspectorController(JSGlobalContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return nullptr; + } + + ExecState* exec = toJS(ctx); + JSLockHolder lock(exec); + + return &exec->vmEntryGlobalObject()->inspectorController(); +} +#endif diff --git a/API/JSContextRef.h b/API/JSContextRef.h new file mode 100644 index 0000000..0c800bc --- /dev/null +++ b/API/JSContextRef.h @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRef_h +#define JSContextRef_h + +#include +#include +#include + +#ifndef __cplusplus +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@function +@abstract Creates a JavaScript context group. +@discussion A JSContextGroup associates JavaScript contexts with one another. + Contexts in the same group may share and exchange JavaScript objects. Sharing and/or exchanging + JavaScript objects between contexts in different groups will produce undefined behavior. + When objects from the same context group are used in multiple threads, explicit + synchronization is required. +@result The created JSContextGroup. +*/ +JS_EXPORT JSContextGroupRef JSContextGroupCreate(void) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Retains a JavaScript context group. +@param group The JSContextGroup to retain. +@result A JSContextGroup that is the same as group. +*/ +JS_EXPORT JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Releases a JavaScript context group. +@param group The JSContextGroup to release. +*/ +JS_EXPORT void JSContextGroupRelease(JSContextGroupRef group) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Creates a global JavaScript execution context. +@discussion JSGlobalContextCreate allocates a global object and populates it with all the + built-in JavaScript objects, such as Object, Function, String, and Array. + + In WebKit version 4.0 and later, the context is created in a unique context group. + Therefore, scripts may execute in it concurrently with scripts executing in other contexts. + However, you may not use values created in the context in other contexts. +@param globalObjectClass The class to use when creating the global object. Pass + NULL to use the default object class. +@result A JSGlobalContext with a global object of class globalObjectClass. +*/ +JS_EXPORT JSGlobalContextRef JSGlobalContextCreate(JSClassRef globalObjectClass) CF_AVAILABLE(10_5, 7_0); + +/*! +@function +@abstract Creates a global JavaScript execution context in the context group provided. +@discussion JSGlobalContextCreateInGroup allocates a global object and populates it with + all the built-in JavaScript objects, such as Object, Function, String, and Array. +@param globalObjectClass The class to use when creating the global object. Pass + NULL to use the default object class. +@param group The context group to use. The created global context retains the group. + Pass NULL to create a unique group for the context. +@result A JSGlobalContext with a global object of class globalObjectClass and a context + group equal to group. +*/ +JS_EXPORT JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClassRef globalObjectClass) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Retains a global JavaScript execution context. +@param ctx The JSGlobalContext to retain. +@result A JSGlobalContext that is the same as ctx. +*/ +JS_EXPORT JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx); + +/*! +@function +@abstract Releases a global JavaScript execution context. +@param ctx The JSGlobalContext to release. +*/ +JS_EXPORT void JSGlobalContextRelease(JSGlobalContextRef ctx); + +/*! +@function +@abstract Gets the global object of a JavaScript execution context. +@param ctx The JSContext whose global object you want to get. +@result ctx's global object. +*/ +JS_EXPORT JSObjectRef JSContextGetGlobalObject(JSContextRef ctx); + +/*! +@function +@abstract Gets the context group to which a JavaScript execution context belongs. +@param ctx The JSContext whose group you want to get. +@result ctx's group. +*/ +JS_EXPORT JSContextGroupRef JSContextGetGroup(JSContextRef ctx) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Gets the global context of a JavaScript execution context. +@param ctx The JSContext whose global context you want to get. +@result ctx's global context. +*/ +JS_EXPORT JSGlobalContextRef JSContextGetGlobalContext(JSContextRef ctx) CF_AVAILABLE(10_7, 7_0); + +/*! +@function +@abstract Gets a copy of the name of a context. +@param ctx The JSGlobalContext whose name you want to get. +@result The name for ctx. +@discussion A JSGlobalContext's name is exposed for remote debugging to make it +easier to identify the context you would like to attach to. +*/ +JS_EXPORT JSStringRef JSGlobalContextCopyName(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the remote debugging name for a context. +@param ctx The JSGlobalContext that you want to name. +@param name The remote debugging name to set on ctx. +*/ +JS_EXPORT void JSGlobalContextSetName(JSGlobalContextRef ctx, JSStringRef name) CF_AVAILABLE(10_10, 8_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSContextRef_h */ diff --git a/API/JSContextRefInspectorSupport.h b/API/JSContextRefInspectorSupport.h new file mode 100644 index 0000000..a09d828 --- /dev/null +++ b/API/JSContextRefInspectorSupport.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRefInspectorSupport_h +#define JSContextRefInspectorSupport_h + +#ifndef __cplusplus +#error Requires C++ Support. +#endif + +#include + +namespace Inspector { +class AugmentableInspectorController; +} + +extern "C" { +JS_EXPORT Inspector::AugmentableInspectorController* JSGlobalContextGetAugmentableInspectorController(JSGlobalContextRef); +} + +#endif // JSContextRefInspectorSupport_h diff --git a/API/JSContextRefInternal.h b/API/JSContextRefInternal.h new file mode 100644 index 0000000..79d7eb6 --- /dev/null +++ b/API/JSContextRefInternal.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRefInternal_h +#define JSContextRefInternal_h + +#include "JSContextRefPrivate.h" + +#if USE(CF) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if USE(CF) +/*! +@function +@abstract Gets the run loop used by the Web Inspector debugger when evaluating JavaScript in this context. +@param ctx The JSGlobalContext whose setting you want to get. +*/ +JS_EXPORT CFRunLoopRef JSGlobalContextGetDebuggerRunLoop(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the run loop used by the Web Inspector debugger when evaluating JavaScript in this context. +@param ctx The JSGlobalContext that you want to change. +@param runLoop The new value of the setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetDebuggerRunLoop(JSGlobalContextRef ctx, CFRunLoopRef runLoop) CF_AVAILABLE(10_10, 8_0); +#endif + +#ifdef __cplusplus +} +#endif + +#endif // JSContextRefInternal_h diff --git a/API/JSContextRefPrivate.h b/API/JSContextRefPrivate.h new file mode 100644 index 0000000..19604ea --- /dev/null +++ b/API/JSContextRefPrivate.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSContextRefPrivate_h +#define JSContextRefPrivate_h + +#include +#include +#include + +#ifndef __cplusplus +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@function +@abstract Gets a Backtrace for the existing context +@param ctx The JSContext whose backtrace you want to get +@result A string containing the backtrace +*/ +JS_EXPORT JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) CF_AVAILABLE(10_6, 7_0); + + +/*! +@typedef JSShouldTerminateCallback +@abstract The callback invoked when script execution has exceeded the allowed + time limit previously specified via JSContextGroupSetExecutionTimeLimit. +@param ctx The execution context to use. +@param context User specified context data previously passed to + JSContextGroupSetExecutionTimeLimit. +@discussion If you named your function Callback, you would declare it like this: + + bool Callback(JSContextRef ctx, void* context); + + If you return true, the timed out script will terminate. + If you return false, the script will run for another period of the allowed + time limit specified via JSContextGroupSetExecutionTimeLimit. + + Within this callback function, you may call JSContextGroupSetExecutionTimeLimit + to set a new time limit, or JSContextGroupClearExecutionTimeLimit to cancel the + timeout. +*/ +typedef bool +(*JSShouldTerminateCallback) (JSContextRef ctx, void* context); + +/*! +@function +@abstract Sets the script execution time limit. +@param group The JavaScript context group that this time limit applies to. +@param limit The time limit of allowed script execution time in seconds. +@param callback The callback function that will be invoked when the time limit + has been reached. This will give you a chance to decide if you want to + terminate the script or not. If you pass a NULL callback, the script will be + terminated unconditionally when the time limit has been reached. +@param context User data that you can provide to be passed back to you + in your callback. + + In order to guarantee that the execution time limit will take effect, you will + need to call JSContextGroupSetExecutionTimeLimit before you start executing + any scripts. +*/ +JS_EXPORT void JSContextGroupSetExecutionTimeLimit(JSContextGroupRef group, double limit, JSShouldTerminateCallback callback, void* context) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Clears the script execution time limit. +@param group The JavaScript context group that the time limit is cleared on. +*/ +JS_EXPORT void JSContextGroupClearExecutionTimeLimit(JSContextGroupRef group) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Gets a whether or not remote inspection is enabled on the context. +@param ctx The JSGlobalContext whose setting you want to get. +@result The value of the setting, true if remote inspection is enabled, otherwise false. +@discussion Remote inspection is true by default. +*/ +JS_EXPORT bool JSGlobalContextGetRemoteInspectionEnabled(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the remote inspection setting for a context. +@param ctx The JSGlobalContext that you want to change. +@param enabled The new remote inspection enabled setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetRemoteInspectionEnabled(JSGlobalContextRef ctx, bool enabled) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Gets the include native call stack when reporting exceptions setting for a context. +@param ctx The JSGlobalContext whose setting you want to get. +@result The value of the setting, true if remote inspection is enabled, otherwise false. +@discussion This setting is true by default. +*/ +JS_EXPORT bool JSGlobalContextGetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx) CF_AVAILABLE(10_10, 8_0); + +/*! +@function +@abstract Sets the include native call stack when reporting exceptions setting for a context. +@param ctx The JSGlobalContext that you want to change. +@param includesNativeCallStack The new value of the setting for the context. +*/ +JS_EXPORT void JSGlobalContextSetIncludesNativeCallStackWhenReportingExceptions(JSGlobalContextRef ctx, bool includesNativeCallStack) CF_AVAILABLE(10_10, 8_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSContextRefPrivate_h */ diff --git a/API/JSExport.h b/API/JSExport.h new file mode 100644 index 0000000..b8a4849 --- /dev/null +++ b/API/JSExport.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +/*! +@protocol +@abstract JSExport provides a declarative way to export Objective-C objects and + classes -- including properties, instance methods, class methods, and + initializers -- to JavaScript. + +@discussion When an Objective-C object is exported to JavaScript, a JavaScript + wrapper object is created. + + In JavaScript, inheritance works via a chain of prototype objects. + For each Objective-C class in each JSContext, an object appropriate for use + as a prototype will be provided. For the class NSObject the prototype + will be the Object prototype. For all other Objective-C + classes a prototype will be created. The prototype for a given + Objective-C class will have its internal [Prototype] property set to point to + the prototype created for the Objective-C class's superclass. As such the + prototype chain for a JavaScript wrapper object will reflect the wrapped + Objective-C type's inheritance hierarchy. + + JavaScriptCore also produces a constructor for each Objective-C class. The + constructor has a property named 'prototype' that references the prototype, + and the prototype has a property named 'constructor' that references the + constructor. + + By default JavaScriptCore does not export any methods or properties from an + Objective-C class to JavaScript; however methods and properties may be exported + explicitly using JSExport. For each protocol that a class conforms to, if the + protocol incorporates the protocol JSExport, JavaScriptCore exports the methods + and properties in that protocol to JavaScript + + For each exported instance method JavaScriptCore will assign a corresponding + JavaScript function to the prototype. For each exported Objective-C property + JavaScriptCore will assign a corresponding JavaScript accessor to the prototype. + For each exported class method JavaScriptCore will assign a corresponding + JavaScript function to the constructor. For example: + +
+@textblock
+    @protocol MyClassJavaScriptMethods 
+    - (void)foo;
+    @end
+
+    @interface MyClass : NSObject 
+    - (void)foo;
+    - (void)bar;
+    @end
+@/textblock
+
+ + Data properties that are created on the prototype or constructor objects have + the attributes: writable:true, enumerable:false, configurable:true. + Accessor properties have the attributes: enumerable:false and configurable:true. + + If an instance of MyClass is converted to a JavaScript value, the resulting + wrapper object will (via its prototype) export the method foo to JavaScript, + since the class conforms to the MyClassJavaScriptMethods protocol, and this + protocol incorporates JSExport. bar will not be exported. + + JSExport supports properties, arguments, and return values of the following types: + + Primitive numbers: signed values up to 32-bits convert using JSValue's + valueWithInt32/toInt32. Unsigned values up to 32-bits convert using JSValue's + valueWithUInt32/toUInt32. All other numeric values convert using JSValue's + valueWithDouble/toDouble. + + BOOL: values convert using JSValue's valueWithBool/toBool. + + id: values convert using JSValue's valueWithObject/toObject. + + Objective-C instance pointers: Pointers convert using JSValue's + valueWithObjectOfClass/toObject. + + C structs: C structs for CGPoint, NSRange, CGRect, and CGSize convert using + JSValue's appropriate methods. Other C structs are not supported. + + Blocks: Blocks convert using JSValue's valueWithObject/toObject. + + All objects that conform to JSExport convert to JavaScript wrapper objects, + even if they subclass classes that would otherwise behave differently. For + example, if a subclass of NSString conforms to JSExport, it converts to + JavaScript as a wrapper object rather than a JavaScript string. +*/ +@protocol JSExport +@end + +/*! +@define +@abstract Rename a selector when it's exported to JavaScript. +@discussion When a selector that takes one or more arguments is converted to a JavaScript + property name, by default a property name will be generated by performing the + following conversion: + + - All colons are removed from the selector + + - Any lowercase letter that had followed a colon will be capitalized. + + Under the default conversion a selector doFoo:withBar: will be exported as + doFooWithBar. The default conversion may be overriden using the JSExportAs + macro, for example to export a method doFoo:withBar: as doFoo: + +
+@textblock
+    @protocol MyClassJavaScriptMethods 
+    JSExportAs(doFoo,
+    - (void)doFoo:(id)foo withBar:(id)bar
+    );
+    @end
+@/textblock
+
+ + Note that the JSExport macro may only be applied to a selector that takes one + or more argument. +*/ +#define JSExportAs(PropertyName, Selector) \ + @optional Selector __JS_EXPORT_AS__##PropertyName:(id)argument; @required Selector + +#endif diff --git a/API/JSManagedValue.h b/API/JSManagedValue.h new file mode 100644 index 0000000..01073fa --- /dev/null +++ b/API/JSManagedValue.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSManagedValue_h +#define JSManagedValue_h + +#import +#import + +#if JSC_OBJC_API_ENABLED + +@class JSValue; +@class JSContext; + +/*! +@interface +@discussion JSManagedValue represents a "conditionally retained" JSValue. + "Conditionally retained" means that as long as the JSManagedValue's + JSValue is reachable through the JavaScript object graph, + or through the Objective-C object graph reported to the JSVirtualMachine using + addManagedReference:withOwner:, the corresponding JSValue will + be retained. However, if neither graph reaches the JSManagedValue, the + corresponding JSValue will be released and set to nil. + +The primary use for a JSManagedValue is to store a JSValue in an Objective-C +or Swift object that is exported to JavaScript. It is incorrect to store a JSValue +in an object that is exported to JavaScript, since doing so creates a retain cycle. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSManagedValue : NSObject + +/*! +@method +@abstract Create a JSManagedValue from a JSValue. +@result The new JSManagedValue. +*/ ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value; ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value andOwner:(id)owner NS_AVAILABLE(10_10, 8_0); + +/*! +@method +@abstract Create a JSManagedValue. +@result The new JSManagedValue. +*/ +- (instancetype)initWithValue:(JSValue *)value; + +/*! +@property +@abstract Get the JSValue from the JSManagedValue. +@result The corresponding JSValue for this JSManagedValue or + nil if the JSValue has been collected. +*/ +@property (readonly, strong) JSValue *value; + +@end + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSManagedValue_h diff --git a/API/JSManagedValue.mm b/API/JSManagedValue.mm new file mode 100644 index 0000000..038a682 --- /dev/null +++ b/API/JSManagedValue.mm @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#import "config.h" +#import "JSManagedValue.h" + +#if JSC_OBJC_API_ENABLED + +#import "APICast.h" +#import "Heap.h" +#import "JSContextInternal.h" +#import "JSValueInternal.h" +#import "Weak.h" +#import "WeakHandleOwner.h" +#import "ObjcRuntimeExtras.h" +#import "JSCInlines.h" +#import +#import + +class JSManagedValueHandleOwner : public JSC::WeakHandleOwner { +public: + void finalize(JSC::Handle, void* context) override; + bool isReachableFromOpaqueRoots(JSC::Handle, void* context, JSC::SlotVisitor&) override; +}; + +static JSManagedValueHandleOwner* managedValueHandleOwner() +{ + static NeverDestroyed jsManagedValueHandleOwner; + return &jsManagedValueHandleOwner.get(); +} + +class WeakValueRef { +public: + WeakValueRef() + : m_tag(NotSet) + { + } + + ~WeakValueRef() + { + clear(); + } + + void clear() + { + switch (m_tag) { + case NotSet: + return; + case Primitive: + u.m_primitive = JSC::JSValue(); + return; + case Object: + u.m_object.clear(); + return; + case String: + u.m_string.clear(); + return; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + bool isClear() const + { + switch (m_tag) { + case NotSet: + return true; + case Primitive: + return !u.m_primitive; + case Object: + return !u.m_object; + case String: + return !u.m_string; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + bool isSet() const { return m_tag != NotSet; } + bool isPrimitive() const { return m_tag == Primitive; } + bool isObject() const { return m_tag == Object; } + bool isString() const { return m_tag == String; } + + void setPrimitive(JSC::JSValue primitive) + { + ASSERT(!isSet()); + ASSERT(!u.m_primitive); + ASSERT(primitive.isPrimitive()); + m_tag = Primitive; + u.m_primitive = primitive; + } + + void setObject(JSC::JSObject* object, void* context) + { + ASSERT(!isSet()); + ASSERT(!u.m_object); + m_tag = Object; + JSC::Weak weak(object, managedValueHandleOwner(), context); + u.m_object.swap(weak); + } + + void setString(JSC::JSString* string, void* context) + { + ASSERT(!isSet()); + ASSERT(!u.m_object); + m_tag = String; + JSC::Weak weak(string, managedValueHandleOwner(), context); + u.m_string.swap(weak); + } + + JSC::JSObject* object() + { + ASSERT(isObject()); + return u.m_object.get(); + } + + JSC::JSValue primitive() + { + ASSERT(isPrimitive()); + return u.m_primitive; + } + + JSC::JSString* string() + { + ASSERT(isString()); + return u.m_string.get(); + } + +private: + enum WeakTypeTag { NotSet, Primitive, Object, String }; + WeakTypeTag m_tag; + union WeakValueUnion { + public: + WeakValueUnion () + : m_primitive(JSC::JSValue()) + { + } + + ~WeakValueUnion() + { + ASSERT(!m_primitive); + } + + JSC::JSValue m_primitive; + JSC::Weak m_object; + JSC::Weak m_string; + } u; +}; + +@implementation JSManagedValue { + JSC::Weak m_globalObject; + RefPtr m_lock; + WeakValueRef m_weakValue; + NSMapTable *m_owners; +} + ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value +{ + return [[[self alloc] initWithValue:value] autorelease]; +} + ++ (JSManagedValue *)managedValueWithValue:(JSValue *)value andOwner:(id)owner +{ + JSManagedValue *managedValue = [[self alloc] initWithValue:value]; + [value.context.virtualMachine addManagedReference:managedValue withOwner:owner]; + return [managedValue autorelease]; +} + +- (instancetype)init +{ + return [self initWithValue:nil]; +} + +- (instancetype)initWithValue:(JSValue *)value +{ + self = [super init]; + if (!self) + return nil; + + if (!value) + return self; + + JSC::ExecState* exec = toJS([value.context JSGlobalContextRef]); + JSC::JSGlobalObject* globalObject = exec->lexicalGlobalObject(); + JSC::Weak weak(globalObject, managedValueHandleOwner(), self); + m_globalObject.swap(weak); + + m_lock = &exec->vm().apiLock(); + + NSPointerFunctionsOptions weakIDOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + NSPointerFunctionsOptions integerOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsIntegerPersonality; + m_owners = [[NSMapTable alloc] initWithKeyOptions:weakIDOptions valueOptions:integerOptions capacity:1]; + + JSC::JSValue jsValue = toJS(exec, [value JSValueRef]); + if (jsValue.isObject()) + m_weakValue.setObject(JSC::jsCast(jsValue.asCell()), self); + else if (jsValue.isString()) + m_weakValue.setString(JSC::jsCast(jsValue.asCell()), self); + else + m_weakValue.setPrimitive(jsValue); + return self; +} + +- (void)dealloc +{ + JSVirtualMachine *virtualMachine = [[[self value] context] virtualMachine]; + if (virtualMachine) { + NSMapTable *copy = [m_owners copy]; + for (id owner in [copy keyEnumerator]) { + size_t count = reinterpret_cast(NSMapGet(m_owners, owner)); + while (count--) + [virtualMachine removeManagedReference:self withOwner:owner]; + } + [copy release]; + } + + [self disconnectValue]; + [m_owners release]; + [super dealloc]; +} + +- (void)didAddOwner:(id)owner +{ + size_t count = reinterpret_cast(NSMapGet(m_owners, owner)); + NSMapInsert(m_owners, owner, reinterpret_cast(count + 1)); +} + +- (void)didRemoveOwner:(id)owner +{ + size_t count = reinterpret_cast(NSMapGet(m_owners, owner)); + + if (!count) + return; + + if (count == 1) { + NSMapRemove(m_owners, owner); + return; + } + + NSMapInsert(m_owners, owner, reinterpret_cast(count - 1)); +} + +- (JSValue *)value +{ + WTF::Locker locker(m_lock.get()); + if (!m_lock->vm()) + return nil; + + JSC::JSLockHolder apiLocker(m_lock->vm()); + if (!m_globalObject) + return nil; + if (m_weakValue.isClear()) + return nil; + JSC::ExecState* exec = m_globalObject->globalExec(); + JSContext *context = [JSContext contextWithJSGlobalContextRef:toGlobalRef(exec)]; + JSC::JSValue value; + if (m_weakValue.isPrimitive()) + value = m_weakValue.primitive(); + else if (m_weakValue.isString()) + value = m_weakValue.string(); + else + value = m_weakValue.object(); + return [JSValue valueWithJSValueRef:toRef(exec, value) inContext:context]; +} + +- (void)disconnectValue +{ + m_globalObject.clear(); + m_weakValue.clear(); +} + +@end + +@interface JSManagedValue (PrivateMethods) +- (void)disconnectValue; +@end + +bool JSManagedValueHandleOwner::isReachableFromOpaqueRoots(JSC::Handle, void* context, JSC::SlotVisitor& visitor) +{ + JSManagedValue *managedValue = static_cast(context); + return visitor.containsOpaqueRoot(managedValue); +} + +void JSManagedValueHandleOwner::finalize(JSC::Handle, void* context) +{ + JSManagedValue *managedValue = static_cast(context); + [managedValue disconnectValue]; +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/JSManagedValueInternal.h b/API/JSManagedValueInternal.h new file mode 100644 index 0000000..2443fe5 --- /dev/null +++ b/API/JSManagedValueInternal.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSManagedValueInternal_h +#define JSManagedValueInternal_h + +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSManagedValue(Internal) + +- (void)didAddOwner:(id)owner; +- (void)didRemoveOwner:(id)owner; + +@end + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSManagedValueInternal_h diff --git a/API/JSObjectRef.cpp b/API/JSObjectRef.cpp new file mode 100644 index 0000000..87e7c77 --- /dev/null +++ b/API/JSObjectRef.cpp @@ -0,0 +1,669 @@ +/* + * Copyright (C) 2006, 2007, 2008, 2016 Apple Inc. All rights reserved. + * Copyright (C) 2008 Kelvin W Sherlock (ksherlock@gmail.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSObjectRef.h" +#include "JSObjectRefPrivate.h" + +#include "APICast.h" +#include "APIUtils.h" +#include "DateConstructor.h" +#include "ErrorConstructor.h" +#include "Exception.h" +#include "FunctionConstructor.h" +#include "Identifier.h" +#include "InitializeThreading.h" +#include "JSAPIWrapperObject.h" +#include "JSArray.h" +#include "JSCInlines.h" +#include "JSCallbackConstructor.h" +#include "JSCallbackFunction.h" +#include "JSCallbackObject.h" +#include "JSClassRef.h" +#include "JSFunction.h" +#include "JSGlobalObject.h" +#include "JSObject.h" +#include "JSRetainPtr.h" +#include "JSString.h" +#include "JSValueRef.h" +#include "ObjectConstructor.h" +#include "ObjectPrototype.h" +#include "PropertyNameArray.h" +#include "RegExpConstructor.h" + +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + +using namespace JSC; + +JSClassRef JSClassCreate(const JSClassDefinition* definition) +{ + initializeThreading(); + auto jsClass = (definition->attributes & kJSClassAttributeNoAutomaticPrototype) + ? OpaqueJSClass::createNoAutomaticPrototype(definition) + : OpaqueJSClass::create(definition); + + return &jsClass.leakRef(); +} + +JSClassRef JSClassRetain(JSClassRef jsClass) +{ + jsClass->ref(); + return jsClass; +} + +void JSClassRelease(JSClassRef jsClass) +{ + jsClass->deref(); +} + +JSObjectRef JSObjectMake(JSContextRef ctx, JSClassRef jsClass, void* data) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (!jsClass) + return toRef(constructEmptyObject(exec)); + + JSCallbackObject* object = JSCallbackObject::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), jsClass, data); + if (JSObject* prototype = jsClass->prototype(exec)) + object->setPrototypeDirect(exec->vm(), prototype); + + return toRef(object); +} + +JSObjectRef JSObjectMakeFunctionWithCallback(JSContextRef ctx, JSStringRef name, JSObjectCallAsFunctionCallback callAsFunction) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + return toRef(JSCallbackFunction::create(exec->vm(), exec->lexicalGlobalObject(), callAsFunction, name ? name->string() : ASCIILiteral("anonymous"))); +} + +JSObjectRef JSObjectMakeConstructor(JSContextRef ctx, JSClassRef jsClass, JSObjectCallAsConstructorCallback callAsConstructor) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsPrototype = jsClass ? jsClass->prototype(exec) : 0; + if (!jsPrototype) + jsPrototype = exec->lexicalGlobalObject()->objectPrototype(); + + JSCallbackConstructor* constructor = JSCallbackConstructor::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackConstructorStructure(), jsClass, callAsConstructor); + constructor->putDirect(exec->vm(), exec->propertyNames().prototype, jsPrototype, DontEnum | DontDelete | ReadOnly); + return toRef(constructor); +} + +JSObjectRef JSObjectMakeFunction(JSContextRef ctx, JSStringRef name, unsigned parameterCount, const JSStringRef parameterNames[], JSStringRef body, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + startingLineNumber = std::max(1, startingLineNumber); + Identifier nameID = name ? name->identifier(&exec->vm()) : Identifier::fromString(exec, "anonymous"); + + MarkedArgumentBuffer args; + for (unsigned i = 0; i < parameterCount; i++) + args.append(jsString(exec, parameterNames[i]->string())); + args.append(jsString(exec, body->string())); + + JSObject* result = constructFunction(exec, exec->lexicalGlobalObject(), args, nameID, sourceURL ? sourceURL->string() : String(), TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber())); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + return toRef(result); +} + +JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* result; + if (argumentCount) { + MarkedArgumentBuffer argList; + for (size_t i = 0; i < argumentCount; ++i) + argList.append(toJS(exec, arguments[i])); + + result = constructArray(exec, static_cast(0), argList); + } else + result = constructEmptyArray(exec, 0); + + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + + return toRef(result); +} + +JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + MarkedArgumentBuffer argList; + for (size_t i = 0; i < argumentCount; ++i) + argList.append(toJS(exec, arguments[i])); + + JSObject* result = constructDate(exec, exec->lexicalGlobalObject(), JSValue(), argList); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + + return toRef(result); +} + +JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue message = argumentCount ? toJS(exec, arguments[0]) : jsUndefined(); + Structure* errorStructure = exec->lexicalGlobalObject()->errorStructure(); + JSObject* result = ErrorInstance::create(exec, errorStructure, message); + + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + + return toRef(result); +} + +JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + MarkedArgumentBuffer argList; + for (size_t i = 0; i < argumentCount; ++i) + argList.append(toJS(exec, arguments[i])); + + JSObject* result = constructRegExp(exec, exec->lexicalGlobalObject(), argList); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + + return toRef(result); +} + +JSValueRef JSObjectGetPrototype(JSContextRef ctx, JSObjectRef object) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + return toRef(exec, jsObject->getPrototypeDirect()); +} + +void JSObjectSetPrototype(JSContextRef ctx, JSObjectRef object, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + JSValue jsValue = toJS(exec, value); + + if (JSProxy* proxy = jsDynamicCast(jsObject)) { + if (JSGlobalObject* globalObject = jsDynamicCast(proxy->target())) { + globalObject->resetPrototype(exec->vm(), jsValue.isObject() ? jsValue : jsNull()); + return; + } + // Someday we might use proxies for something other than JSGlobalObjects, but today is not that day. + RELEASE_ASSERT_NOT_REACHED(); + } + jsObject->setPrototype(exec->vm(), exec, jsValue.isObject() ? jsValue : jsNull()); +} + +bool JSObjectHasProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + + return jsObject->hasProperty(exec, propertyName->identifier(&exec->vm())); +} + +JSValueRef JSObjectGetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + + JSValue jsValue = jsObject->get(exec, propertyName->identifier(&exec->vm())); + handleExceptionIfNeeded(exec, exception); + return toRef(exec, jsValue); +} + +void JSObjectSetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSPropertyAttributes attributes, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + VM& vm = exec->vm(); + JSLockHolder locker(vm); + auto scope = DECLARE_CATCH_SCOPE(vm); + + JSObject* jsObject = toJS(object); + Identifier name(propertyName->identifier(&exec->vm())); + JSValue jsValue = toJS(exec, value); + + bool doesNotHaveProperty = attributes && !jsObject->hasProperty(exec, name); + if (LIKELY(!scope.exception())) { + if (doesNotHaveProperty) { + PropertyDescriptor desc(jsValue, attributes); + jsObject->methodTable()->defineOwnProperty(jsObject, exec, name, desc, false); + } else { + PutPropertySlot slot(jsObject); + jsObject->methodTable()->put(jsObject, exec, name, jsValue, slot); + } + } + handleExceptionIfNeeded(exec, exception); +} + +JSValueRef JSObjectGetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned propertyIndex, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + + JSValue jsValue = jsObject->get(exec, propertyIndex); + handleExceptionIfNeeded(exec, exception); + return toRef(exec, jsValue); +} + + +void JSObjectSetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned propertyIndex, JSValueRef value, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + JSValue jsValue = toJS(exec, value); + + jsObject->methodTable()->putByIndex(jsObject, exec, propertyIndex, jsValue, false); + handleExceptionIfNeeded(exec, exception); +} + +bool JSObjectDeleteProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSObject* jsObject = toJS(object); + + bool result = jsObject->methodTable()->deleteProperty(jsObject, exec, propertyName->identifier(&exec->vm())); + handleExceptionIfNeeded(exec, exception); + return result; +} + +// API objects have private properties, which may get accessed during destruction. This +// helper lets us get the ClassInfo of an API object from a function that may get called +// during destruction. +static const ClassInfo* classInfoPrivate(JSObject* jsObject) +{ + VM* vm = jsObject->vm(); + + if (vm->currentlyDestructingCallbackObject != jsObject) + return jsObject->classInfo(); + + return vm->currentlyDestructingCallbackObjectClassInfo; +} + +void* JSObjectGetPrivate(JSObjectRef object) +{ + JSObject* jsObject = uncheckedToJS(object); + + const ClassInfo* classInfo = classInfoPrivate(jsObject); + + // Get wrapped object if proxied + if (classInfo->isSubClassOf(JSProxy::info())) { + jsObject = static_cast(jsObject)->target(); + classInfo = jsObject->classInfo(); + } + + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); +#if JSC_OBJC_API_ENABLED + if (classInfo->isSubClassOf(JSCallbackObject::info())) + return static_cast*>(jsObject)->getPrivate(); +#endif + + return 0; +} + +bool JSObjectSetPrivate(JSObjectRef object, void* data) +{ + JSObject* jsObject = uncheckedToJS(object); + + const ClassInfo* classInfo = classInfoPrivate(jsObject); + + // Get wrapped object if proxied + if (classInfo->isSubClassOf(JSProxy::info())) { + jsObject = static_cast(jsObject)->target(); + classInfo = jsObject->classInfo(); + } + + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); + return true; + } + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); + return true; + } +#if JSC_OBJC_API_ENABLED + if (classInfo->isSubClassOf(JSCallbackObject::info())) { + static_cast*>(jsObject)->setPrivate(data); + return true; + } +#endif + + return false; +} + +JSValueRef JSObjectGetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* jsObject = toJS(object); + JSValue result; + Identifier name(propertyName->identifier(&exec->vm())); + + // Get wrapped object if proxied + if (jsObject->inherits(JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(JSCallbackObject::info())) + result = jsCast*>(jsObject)->getPrivateProperty(name); + else if (jsObject->inherits(JSCallbackObject::info())) + result = jsCast*>(jsObject)->getPrivateProperty(name); +#if JSC_OBJC_API_ENABLED + else if (jsObject->inherits(JSCallbackObject::info())) + result = jsCast*>(jsObject)->getPrivateProperty(name); +#endif + return toRef(exec, result); +} + +bool JSObjectSetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* jsObject = toJS(object); + JSValue jsValue = value ? toJS(exec, value) : JSValue(); + Identifier name(propertyName->identifier(&exec->vm())); + + // Get wrapped object if proxied + if (jsObject->inherits(JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); + return true; + } + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); + return true; + } +#if JSC_OBJC_API_ENABLED + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->setPrivateProperty(exec->vm(), name, jsValue); + return true; + } +#endif + return false; +} + +bool JSObjectDeletePrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* jsObject = toJS(object); + Identifier name(propertyName->identifier(&exec->vm())); + + // Get wrapped object if proxied + if (jsObject->inherits(JSProxy::info())) + jsObject = jsCast(jsObject)->target(); + + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->deletePrivateProperty(name); + return true; + } + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->deletePrivateProperty(name); + return true; + } +#if JSC_OBJC_API_ENABLED + if (jsObject->inherits(JSCallbackObject::info())) { + jsCast*>(jsObject)->deletePrivateProperty(name); + return true; + } +#endif + return false; +} + +bool JSObjectIsFunction(JSContextRef ctx, JSObjectRef object) +{ + if (!object) + return false; + JSLockHolder locker(toJS(ctx)); + CallData callData; + JSCell* cell = toJS(object); + return cell->methodTable()->getCallData(cell, callData) != CallType::None; +} + +JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (!object) + return 0; + + JSObject* jsObject = toJS(object); + JSObject* jsThisObject = toJS(thisObject); + + if (!jsThisObject) + jsThisObject = exec->globalThisValue(); + + MarkedArgumentBuffer argList; + for (size_t i = 0; i < argumentCount; i++) + argList.append(toJS(exec, arguments[i])); + + CallData callData; + CallType callType = jsObject->methodTable()->getCallData(jsObject, callData); + if (callType == CallType::None) + return 0; + + JSValueRef result = toRef(exec, profiledCall(exec, ProfilingReason::API, jsObject, callType, callData, jsThisObject, argList)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + return result; +} + +bool JSObjectIsConstructor(JSContextRef, JSObjectRef object) +{ + if (!object) + return false; + JSObject* jsObject = toJS(object); + ConstructData constructData; + return jsObject->methodTable()->getConstructData(jsObject, constructData) != ConstructType::None; +} + +JSObjectRef JSObjectCallAsConstructor(JSContextRef ctx, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (!object) + return 0; + + JSObject* jsObject = toJS(object); + + ConstructData constructData; + ConstructType constructType = jsObject->methodTable()->getConstructData(jsObject, constructData); + if (constructType == ConstructType::None) + return 0; + + MarkedArgumentBuffer argList; + for (size_t i = 0; i < argumentCount; i++) + argList.append(toJS(exec, arguments[i])); + + JSObjectRef result = toRef(profiledConstruct(exec, ProfilingReason::API, jsObject, constructType, constructData, argList)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + result = 0; + return result; +} + +struct OpaqueJSPropertyNameArray { + WTF_MAKE_FAST_ALLOCATED; +public: + OpaqueJSPropertyNameArray(VM* vm) + : refCount(0) + , vm(vm) + { + } + + unsigned refCount; + VM* vm; + Vector> array; +}; + +JSPropertyNameArrayRef JSObjectCopyPropertyNames(JSContextRef ctx, JSObjectRef object) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + VM* vm = &exec->vm(); + + JSObject* jsObject = toJS(object); + JSPropertyNameArrayRef propertyNames = new OpaqueJSPropertyNameArray(vm); + PropertyNameArray array(vm, PropertyNameMode::Strings); + jsObject->methodTable()->getPropertyNames(jsObject, exec, array, EnumerationMode()); + + size_t size = array.size(); + propertyNames->array.reserveInitialCapacity(size); + for (size_t i = 0; i < size; ++i) + propertyNames->array.uncheckedAppend(JSRetainPtr(Adopt, OpaqueJSString::create(array[i].string()).leakRef())); + + return JSPropertyNameArrayRetain(propertyNames); +} + +JSPropertyNameArrayRef JSPropertyNameArrayRetain(JSPropertyNameArrayRef array) +{ + ++array->refCount; + return array; +} + +void JSPropertyNameArrayRelease(JSPropertyNameArrayRef array) +{ + if (--array->refCount == 0) { + JSLockHolder locker(array->vm); + delete array; + } +} + +size_t JSPropertyNameArrayGetCount(JSPropertyNameArrayRef array) +{ + return array->array.size(); +} + +JSStringRef JSPropertyNameArrayGetNameAtIndex(JSPropertyNameArrayRef array, size_t index) +{ + return array->array[static_cast(index)].get(); +} + +void JSPropertyNameAccumulatorAddName(JSPropertyNameAccumulatorRef array, JSStringRef propertyName) +{ + PropertyNameArray* propertyNames = toJS(array); + JSLockHolder locker(propertyNames->vm()); + propertyNames->add(propertyName->identifier(propertyNames->vm())); +} diff --git a/API/JSObjectRef.h b/API/JSObjectRef.h new file mode 100644 index 0000000..95d53b7 --- /dev/null +++ b/API/JSObjectRef.h @@ -0,0 +1,694 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * Copyright (C) 2008 Kelvin W Sherlock (ksherlock@gmail.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSObjectRef_h +#define JSObjectRef_h + +#include +#include +#include + +#ifndef __cplusplus +#include +#endif +#include /* for size_t */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@enum JSPropertyAttribute +@constant kJSPropertyAttributeNone Specifies that a property has no special attributes. +@constant kJSPropertyAttributeReadOnly Specifies that a property is read-only. +@constant kJSPropertyAttributeDontEnum Specifies that a property should not be enumerated by JSPropertyEnumerators and JavaScript for...in loops. +@constant kJSPropertyAttributeDontDelete Specifies that the delete operation should fail on a property. +*/ +enum { + kJSPropertyAttributeNone = 0, + kJSPropertyAttributeReadOnly = 1 << 1, + kJSPropertyAttributeDontEnum = 1 << 2, + kJSPropertyAttributeDontDelete = 1 << 3 +}; + +/*! +@typedef JSPropertyAttributes +@abstract A set of JSPropertyAttributes. Combine multiple attributes by logically ORing them together. +*/ +typedef unsigned JSPropertyAttributes; + +/*! +@enum JSClassAttribute +@constant kJSClassAttributeNone Specifies that a class has no special attributes. +@constant kJSClassAttributeNoAutomaticPrototype Specifies that a class should not automatically generate a shared prototype for its instance objects. Use kJSClassAttributeNoAutomaticPrototype in combination with JSObjectSetPrototype to manage prototypes manually. +*/ +enum { + kJSClassAttributeNone = 0, + kJSClassAttributeNoAutomaticPrototype = 1 << 1 +}; + +/*! +@typedef JSClassAttributes +@abstract A set of JSClassAttributes. Combine multiple attributes by logically ORing them together. +*/ +typedef unsigned JSClassAttributes; + +/*! +@typedef JSObjectInitializeCallback +@abstract The callback invoked when an object is first created. +@param ctx The execution context to use. +@param object The JSObject being created. +@discussion If you named your function Initialize, you would declare it like this: + +void Initialize(JSContextRef ctx, JSObjectRef object); + +Unlike the other object callbacks, the initialize callback is called on the least +derived class (the parent class) first, and the most derived class last. +*/ +typedef void +(*JSObjectInitializeCallback) (JSContextRef ctx, JSObjectRef object); + +/*! +@typedef JSObjectFinalizeCallback +@abstract The callback invoked when an object is finalized (prepared for garbage collection). An object may be finalized on any thread. +@param object The JSObject being finalized. +@discussion If you named your function Finalize, you would declare it like this: + +void Finalize(JSObjectRef object); + +The finalize callback is called on the most derived class first, and the least +derived class (the parent class) last. + +You must not call any function that may cause a garbage collection or an allocation +of a garbage collected object from within a JSObjectFinalizeCallback. This includes +all functions that have a JSContextRef parameter. +*/ +typedef void +(*JSObjectFinalizeCallback) (JSObjectRef object); + +/*! +@typedef JSObjectHasPropertyCallback +@abstract The callback invoked when determining whether an object has a property. +@param ctx The execution context to use. +@param object The JSObject to search for the property. +@param propertyName A JSString containing the name of the property look up. +@result true if object has the property, otherwise false. +@discussion If you named your function HasProperty, you would declare it like this: + +bool HasProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName); + +If this function returns false, the hasProperty request forwards to object's statically declared properties, then its parent class chain (which includes the default object class), then its prototype chain. + +This callback enables optimization in cases where only a property's existence needs to be known, not its value, and computing its value would be expensive. + +If this callback is NULL, the getProperty callback will be used to service hasProperty requests. +*/ +typedef bool +(*JSObjectHasPropertyCallback) (JSContextRef ctx, JSObjectRef object, JSStringRef propertyName); + +/*! +@typedef JSObjectGetPropertyCallback +@abstract The callback invoked when getting a property's value. +@param ctx The execution context to use. +@param object The JSObject to search for the property. +@param propertyName A JSString containing the name of the property to get. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result The property's value if object has the property, otherwise NULL. +@discussion If you named your function GetProperty, you would declare it like this: + +JSValueRef GetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +If this function returns NULL, the get request forwards to object's statically declared properties, then its parent class chain (which includes the default object class), then its prototype chain. +*/ +typedef JSValueRef +(*JSObjectGetPropertyCallback) (JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +/*! +@typedef JSObjectSetPropertyCallback +@abstract The callback invoked when setting a property's value. +@param ctx The execution context to use. +@param object The JSObject on which to set the property's value. +@param propertyName A JSString containing the name of the property to set. +@param value A JSValue to use as the property's value. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result true if the property was set, otherwise false. +@discussion If you named your function SetProperty, you would declare it like this: + +bool SetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception); + +If this function returns false, the set request forwards to object's statically declared properties, then its parent class chain (which includes the default object class). +*/ +typedef bool +(*JSObjectSetPropertyCallback) (JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception); + +/*! +@typedef JSObjectDeletePropertyCallback +@abstract The callback invoked when deleting a property. +@param ctx The execution context to use. +@param object The JSObject in which to delete the property. +@param propertyName A JSString containing the name of the property to delete. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result true if propertyName was successfully deleted, otherwise false. +@discussion If you named your function DeleteProperty, you would declare it like this: + +bool DeleteProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +If this function returns false, the delete request forwards to object's statically declared properties, then its parent class chain (which includes the default object class). +*/ +typedef bool +(*JSObjectDeletePropertyCallback) (JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +/*! +@typedef JSObjectGetPropertyNamesCallback +@abstract The callback invoked when collecting the names of an object's properties. +@param ctx The execution context to use. +@param object The JSObject whose property names are being collected. +@param propertyNames A JavaScript property name accumulator in which to accumulate the names of object's properties. +@discussion If you named your function GetPropertyNames, you would declare it like this: + +void GetPropertyNames(JSContextRef ctx, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames); + +Property name accumulators are used by JSObjectCopyPropertyNames and JavaScript for...in loops. + +Use JSPropertyNameAccumulatorAddName to add property names to accumulator. A class's getPropertyNames callback only needs to provide the names of properties that the class vends through a custom getProperty or setProperty callback. Other properties, including statically declared properties, properties vended by other classes, and properties belonging to object's prototype, are added independently. +*/ +typedef void +(*JSObjectGetPropertyNamesCallback) (JSContextRef ctx, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames); + +/*! +@typedef JSObjectCallAsFunctionCallback +@abstract The callback invoked when an object is called as a function. +@param ctx The execution context to use. +@param function A JSObject that is the function being called. +@param thisObject A JSObject that is the 'this' variable in the function's scope. +@param argumentCount An integer count of the number of arguments in arguments. +@param arguments A JSValue array of the arguments passed to the function. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result A JSValue that is the function's return value. +@discussion If you named your function CallAsFunction, you would declare it like this: + +JSValueRef CallAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +If your callback were invoked by the JavaScript expression 'myObject.myFunction()', function would be set to myFunction, and thisObject would be set to myObject. + +If this callback is NULL, calling your object as a function will throw an exception. +*/ +typedef JSValueRef +(*JSObjectCallAsFunctionCallback) (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +/*! +@typedef JSObjectCallAsConstructorCallback +@abstract The callback invoked when an object is used as a constructor in a 'new' expression. +@param ctx The execution context to use. +@param constructor A JSObject that is the constructor being called. +@param argumentCount An integer count of the number of arguments in arguments. +@param arguments A JSValue array of the arguments passed to the function. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result A JSObject that is the constructor's return value. +@discussion If you named your function CallAsConstructor, you would declare it like this: + +JSObjectRef CallAsConstructor(JSContextRef ctx, JSObjectRef constructor, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +If your callback were invoked by the JavaScript expression 'new myConstructor()', constructor would be set to myConstructor. + +If this callback is NULL, using your object as a constructor in a 'new' expression will throw an exception. +*/ +typedef JSObjectRef +(*JSObjectCallAsConstructorCallback) (JSContextRef ctx, JSObjectRef constructor, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +/*! +@typedef JSObjectHasInstanceCallback +@abstract hasInstance The callback invoked when an object is used as the target of an 'instanceof' expression. +@param ctx The execution context to use. +@param constructor The JSObject that is the target of the 'instanceof' expression. +@param possibleInstance The JSValue being tested to determine if it is an instance of constructor. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result true if possibleInstance is an instance of constructor, otherwise false. +@discussion If you named your function HasInstance, you would declare it like this: + +bool HasInstance(JSContextRef ctx, JSObjectRef constructor, JSValueRef possibleInstance, JSValueRef* exception); + +If your callback were invoked by the JavaScript expression 'someValue instanceof myObject', constructor would be set to myObject and possibleInstance would be set to someValue. + +If this callback is NULL, 'instanceof' expressions that target your object will return false. + +Standard JavaScript practice calls for objects that implement the callAsConstructor callback to implement the hasInstance callback as well. +*/ +typedef bool +(*JSObjectHasInstanceCallback) (JSContextRef ctx, JSObjectRef constructor, JSValueRef possibleInstance, JSValueRef* exception); + +/*! +@typedef JSObjectConvertToTypeCallback +@abstract The callback invoked when converting an object to a particular JavaScript type. +@param ctx The execution context to use. +@param object The JSObject to convert. +@param type A JSType specifying the JavaScript type to convert to. +@param exception A pointer to a JSValueRef in which to return an exception, if any. +@result The objects's converted value, or NULL if the object was not converted. +@discussion If you named your function ConvertToType, you would declare it like this: + +JSValueRef ConvertToType(JSContextRef ctx, JSObjectRef object, JSType type, JSValueRef* exception); + +If this function returns false, the conversion request forwards to object's parent class chain (which includes the default object class). + +This function is only invoked when converting an object to number or string. An object converted to boolean is 'true.' An object converted to object is itself. +*/ +typedef JSValueRef +(*JSObjectConvertToTypeCallback) (JSContextRef ctx, JSObjectRef object, JSType type, JSValueRef* exception); + +/*! +@struct JSStaticValue +@abstract This structure describes a statically declared value property. +@field name A null-terminated UTF8 string containing the property's name. +@field getProperty A JSObjectGetPropertyCallback to invoke when getting the property's value. +@field setProperty A JSObjectSetPropertyCallback to invoke when setting the property's value. May be NULL if the ReadOnly attribute is set. +@field attributes A logically ORed set of JSPropertyAttributes to give to the property. +*/ +typedef struct { + const char* name; + JSObjectGetPropertyCallback getProperty; + JSObjectSetPropertyCallback setProperty; + JSPropertyAttributes attributes; +} JSStaticValue; + +/*! +@struct JSStaticFunction +@abstract This structure describes a statically declared function property. +@field name A null-terminated UTF8 string containing the property's name. +@field callAsFunction A JSObjectCallAsFunctionCallback to invoke when the property is called as a function. +@field attributes A logically ORed set of JSPropertyAttributes to give to the property. +*/ +typedef struct { + const char* name; + JSObjectCallAsFunctionCallback callAsFunction; + JSPropertyAttributes attributes; +} JSStaticFunction; + +/*! +@struct JSClassDefinition +@abstract This structure contains properties and callbacks that define a type of object. All fields other than the version field are optional. Any pointer may be NULL. +@field version The version number of this structure. The current version is 0. +@field attributes A logically ORed set of JSClassAttributes to give to the class. +@field className A null-terminated UTF8 string containing the class's name. +@field parentClass A JSClass to set as the class's parent class. Pass NULL use the default object class. +@field staticValues A JSStaticValue array containing the class's statically declared value properties. Pass NULL to specify no statically declared value properties. The array must be terminated by a JSStaticValue whose name field is NULL. +@field staticFunctions A JSStaticFunction array containing the class's statically declared function properties. Pass NULL to specify no statically declared function properties. The array must be terminated by a JSStaticFunction whose name field is NULL. +@field initialize The callback invoked when an object is first created. Use this callback to initialize the object. +@field finalize The callback invoked when an object is finalized (prepared for garbage collection). Use this callback to release resources allocated for the object, and perform other cleanup. +@field hasProperty The callback invoked when determining whether an object has a property. If this field is NULL, getProperty is called instead. The hasProperty callback enables optimization in cases where only a property's existence needs to be known, not its value, and computing its value is expensive. +@field getProperty The callback invoked when getting a property's value. +@field setProperty The callback invoked when setting a property's value. +@field deleteProperty The callback invoked when deleting a property. +@field getPropertyNames The callback invoked when collecting the names of an object's properties. +@field callAsFunction The callback invoked when an object is called as a function. +@field hasInstance The callback invoked when an object is used as the target of an 'instanceof' expression. +@field callAsConstructor The callback invoked when an object is used as a constructor in a 'new' expression. +@field convertToType The callback invoked when converting an object to a particular JavaScript type. +@discussion The staticValues and staticFunctions arrays are the simplest and most efficient means for vending custom properties. Statically declared properties autmatically service requests like getProperty, setProperty, and getPropertyNames. Property access callbacks are required only to implement unusual properties, like array indexes, whose names are not known at compile-time. + +If you named your getter function "GetX" and your setter function "SetX", you would declare a JSStaticValue array containing "X" like this: + +JSStaticValue StaticValueArray[] = { + { "X", GetX, SetX, kJSPropertyAttributeNone }, + { 0, 0, 0, 0 } +}; + +Standard JavaScript practice calls for storing function objects in prototypes, so they can be shared. The default JSClass created by JSClassCreate follows this idiom, instantiating objects with a shared, automatically generating prototype containing the class's function objects. The kJSClassAttributeNoAutomaticPrototype attribute specifies that a JSClass should not automatically generate such a prototype. The resulting JSClass instantiates objects with the default object prototype, and gives each instance object its own copy of the class's function objects. + +A NULL callback specifies that the default object callback should substitute, except in the case of hasProperty, where it specifies that getProperty should substitute. +*/ +typedef struct { + int version; /* current (and only) version is 0 */ + JSClassAttributes attributes; + + const char* className; + JSClassRef parentClass; + + const JSStaticValue* staticValues; + const JSStaticFunction* staticFunctions; + + JSObjectInitializeCallback initialize; + JSObjectFinalizeCallback finalize; + JSObjectHasPropertyCallback hasProperty; + JSObjectGetPropertyCallback getProperty; + JSObjectSetPropertyCallback setProperty; + JSObjectDeletePropertyCallback deleteProperty; + JSObjectGetPropertyNamesCallback getPropertyNames; + JSObjectCallAsFunctionCallback callAsFunction; + JSObjectCallAsConstructorCallback callAsConstructor; + JSObjectHasInstanceCallback hasInstance; + JSObjectConvertToTypeCallback convertToType; +} JSClassDefinition; + +/*! +@const kJSClassDefinitionEmpty +@abstract A JSClassDefinition structure of the current version, filled with NULL pointers and having no attributes. +@discussion Use this constant as a convenience when creating class definitions. For example, to create a class definition with only a finalize method: + +JSClassDefinition definition = kJSClassDefinitionEmpty; +definition.finalize = Finalize; +*/ +JS_EXPORT extern const JSClassDefinition kJSClassDefinitionEmpty; + +/*! +@function +@abstract Creates a JavaScript class suitable for use with JSObjectMake. +@param definition A JSClassDefinition that defines the class. +@result A JSClass with the given definition. Ownership follows the Create Rule. +*/ +JS_EXPORT JSClassRef JSClassCreate(const JSClassDefinition* definition); + +/*! +@function +@abstract Retains a JavaScript class. +@param jsClass The JSClass to retain. +@result A JSClass that is the same as jsClass. +*/ +JS_EXPORT JSClassRef JSClassRetain(JSClassRef jsClass); + +/*! +@function +@abstract Releases a JavaScript class. +@param jsClass The JSClass to release. +*/ +JS_EXPORT void JSClassRelease(JSClassRef jsClass); + +/*! +@function +@abstract Creates a JavaScript object. +@param ctx The execution context to use. +@param jsClass The JSClass to assign to the object. Pass NULL to use the default object class. +@param data A void* to set as the object's private data. Pass NULL to specify no private data. +@result A JSObject with the given class and private data. +@discussion The default object class does not allocate storage for private data, so you must provide a non-NULL jsClass to JSObjectMake if you want your object to be able to store private data. + +data is set on the created object before the intialize methods in its class chain are called. This enables the initialize methods to retrieve and manipulate data through JSObjectGetPrivate. +*/ +JS_EXPORT JSObjectRef JSObjectMake(JSContextRef ctx, JSClassRef jsClass, void* data); + +/*! +@function +@abstract Convenience method for creating a JavaScript function with a given callback as its implementation. +@param ctx The execution context to use. +@param name A JSString containing the function's name. This will be used when converting the function to string. Pass NULL to create an anonymous function. +@param callAsFunction The JSObjectCallAsFunctionCallback to invoke when the function is called. +@result A JSObject that is a function. The object's prototype will be the default function prototype. +*/ +JS_EXPORT JSObjectRef JSObjectMakeFunctionWithCallback(JSContextRef ctx, JSStringRef name, JSObjectCallAsFunctionCallback callAsFunction); + +/*! +@function +@abstract Convenience method for creating a JavaScript constructor. +@param ctx The execution context to use. +@param jsClass A JSClass that is the class your constructor will assign to the objects its constructs. jsClass will be used to set the constructor's .prototype property, and to evaluate 'instanceof' expressions. Pass NULL to use the default object class. +@param callAsConstructor A JSObjectCallAsConstructorCallback to invoke when your constructor is used in a 'new' expression. Pass NULL to use the default object constructor. +@result A JSObject that is a constructor. The object's prototype will be the default object prototype. +@discussion The default object constructor takes no arguments and constructs an object of class jsClass with no private data. +*/ +JS_EXPORT JSObjectRef JSObjectMakeConstructor(JSContextRef ctx, JSClassRef jsClass, JSObjectCallAsConstructorCallback callAsConstructor); + +/*! + @function + @abstract Creates a JavaScript Array object. + @param ctx The execution context to use. + @param argumentCount An integer count of the number of arguments in arguments. + @param arguments A JSValue array of data to populate the Array with. Pass NULL if argumentCount is 0. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObject that is an Array. + @discussion The behavior of this function does not exactly match the behavior of the built-in Array constructor. Specifically, if one argument + is supplied, this function returns an array with one element. + */ +JS_EXPORT JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) CF_AVAILABLE(10_6, 7_0); + +/*! + @function + @abstract Creates a JavaScript Date object, as if by invoking the built-in Date constructor. + @param ctx The execution context to use. + @param argumentCount An integer count of the number of arguments in arguments. + @param arguments A JSValue array of arguments to pass to the Date Constructor. Pass NULL if argumentCount is 0. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObject that is a Date. + */ +JS_EXPORT JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) CF_AVAILABLE(10_6, 7_0); + +/*! + @function + @abstract Creates a JavaScript Error object, as if by invoking the built-in Error constructor. + @param ctx The execution context to use. + @param argumentCount An integer count of the number of arguments in arguments. + @param arguments A JSValue array of arguments to pass to the Error Constructor. Pass NULL if argumentCount is 0. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObject that is a Error. + */ +JS_EXPORT JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) CF_AVAILABLE(10_6, 7_0); + +/*! + @function + @abstract Creates a JavaScript RegExp object, as if by invoking the built-in RegExp constructor. + @param ctx The execution context to use. + @param argumentCount An integer count of the number of arguments in arguments. + @param arguments A JSValue array of arguments to pass to the RegExp Constructor. Pass NULL if argumentCount is 0. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObject that is a RegExp. + */ +JS_EXPORT JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) CF_AVAILABLE(10_6, 7_0); + +/*! +@function +@abstract Creates a function with a given script as its body. +@param ctx The execution context to use. +@param name A JSString containing the function's name. This will be used when converting the function to string. Pass NULL to create an anonymous function. +@param parameterCount An integer count of the number of parameter names in parameterNames. +@param parameterNames A JSString array containing the names of the function's parameters. Pass NULL if parameterCount is 0. +@param body A JSString containing the script to use as the function's body. +@param sourceURL A JSString containing a URL for the script's source file. This is only used when reporting exceptions. Pass NULL if you do not care to include source file information in exceptions. +@param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. +@param exception A pointer to a JSValueRef in which to store a syntax error exception, if any. Pass NULL if you do not care to store a syntax error exception. +@result A JSObject that is a function, or NULL if either body or parameterNames contains a syntax error. The object's prototype will be the default function prototype. +@discussion Use this method when you want to execute a script repeatedly, to avoid the cost of re-parsing the script before each execution. +*/ +JS_EXPORT JSObjectRef JSObjectMakeFunction(JSContextRef ctx, JSStringRef name, unsigned parameterCount, const JSStringRef parameterNames[], JSStringRef body, JSStringRef sourceURL, int startingLineNumber, JSValueRef* exception); + +/*! +@function +@abstract Gets an object's prototype. +@param ctx The execution context to use. +@param object A JSObject whose prototype you want to get. +@result A JSValue that is the object's prototype. +*/ +JS_EXPORT JSValueRef JSObjectGetPrototype(JSContextRef ctx, JSObjectRef object); + +/*! +@function +@abstract Sets an object's prototype. +@param ctx The execution context to use. +@param object The JSObject whose prototype you want to set. +@param value A JSValue to set as the object's prototype. +*/ +JS_EXPORT void JSObjectSetPrototype(JSContextRef ctx, JSObjectRef object, JSValueRef value); + +/*! +@function +@abstract Tests whether an object has a given property. +@param object The JSObject to test. +@param propertyName A JSString containing the property's name. +@result true if the object has a property whose name matches propertyName, otherwise false. +*/ +JS_EXPORT bool JSObjectHasProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName); + +/*! +@function +@abstract Gets a property from an object. +@param ctx The execution context to use. +@param object The JSObject whose property you want to get. +@param propertyName A JSString containing the property's name. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The property's value if object has the property, otherwise the undefined value. +*/ +JS_EXPORT JSValueRef JSObjectGetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +/*! +@function +@abstract Sets a property on an object. +@param ctx The execution context to use. +@param object The JSObject whose property you want to set. +@param propertyName A JSString containing the property's name. +@param value A JSValue to use as the property's value. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@param attributes A logically ORed set of JSPropertyAttributes to give to the property. +*/ +JS_EXPORT void JSObjectSetProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSPropertyAttributes attributes, JSValueRef* exception); + +/*! +@function +@abstract Deletes a property from an object. +@param ctx The execution context to use. +@param object The JSObject whose property you want to delete. +@param propertyName A JSString containing the property's name. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result true if the delete operation succeeds, otherwise false (for example, if the property has the kJSPropertyAttributeDontDelete attribute set). +*/ +JS_EXPORT bool JSObjectDeleteProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception); + +/*! +@function +@abstract Gets a property from an object by numeric index. +@param ctx The execution context to use. +@param object The JSObject whose property you want to get. +@param propertyIndex An integer value that is the property's name. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The property's value if object has the property, otherwise the undefined value. +@discussion Calling JSObjectGetPropertyAtIndex is equivalent to calling JSObjectGetProperty with a string containing propertyIndex, but JSObjectGetPropertyAtIndex provides optimized access to numeric properties. +*/ +JS_EXPORT JSValueRef JSObjectGetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned propertyIndex, JSValueRef* exception); + +/*! +@function +@abstract Sets a property on an object by numeric index. +@param ctx The execution context to use. +@param object The JSObject whose property you want to set. +@param propertyIndex The property's name as a number. +@param value A JSValue to use as the property's value. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@discussion Calling JSObjectSetPropertyAtIndex is equivalent to calling JSObjectSetProperty with a string containing propertyIndex, but JSObjectSetPropertyAtIndex provides optimized access to numeric properties. +*/ +JS_EXPORT void JSObjectSetPropertyAtIndex(JSContextRef ctx, JSObjectRef object, unsigned propertyIndex, JSValueRef value, JSValueRef* exception); + +/*! +@function +@abstract Gets an object's private data. +@param object A JSObject whose private data you want to get. +@result A void* that is the object's private data, if the object has private data, otherwise NULL. +*/ +JS_EXPORT void* JSObjectGetPrivate(JSObjectRef object); + +/*! +@function +@abstract Sets a pointer to private data on an object. +@param object The JSObject whose private data you want to set. +@param data A void* to set as the object's private data. +@result true if object can store private data, otherwise false. +@discussion The default object class does not allocate storage for private data. Only objects created with a non-NULL JSClass can store private data. +*/ +JS_EXPORT bool JSObjectSetPrivate(JSObjectRef object, void* data); + +/*! +@function +@abstract Tests whether an object can be called as a function. +@param ctx The execution context to use. +@param object The JSObject to test. +@result true if the object can be called as a function, otherwise false. +*/ +JS_EXPORT bool JSObjectIsFunction(JSContextRef ctx, JSObjectRef object); + +/*! +@function +@abstract Calls an object as a function. +@param ctx The execution context to use. +@param object The JSObject to call as a function. +@param thisObject The object to use as "this," or NULL to use the global object as "this." +@param argumentCount An integer count of the number of arguments in arguments. +@param arguments A JSValue array of arguments to pass to the function. Pass NULL if argumentCount is 0. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The JSValue that results from calling object as a function, or NULL if an exception is thrown or object is not a function. +*/ +JS_EXPORT JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +/*! +@function +@abstract Tests whether an object can be called as a constructor. +@param ctx The execution context to use. +@param object The JSObject to test. +@result true if the object can be called as a constructor, otherwise false. +*/ +JS_EXPORT bool JSObjectIsConstructor(JSContextRef ctx, JSObjectRef object); + +/*! +@function +@abstract Calls an object as a constructor. +@param ctx The execution context to use. +@param object The JSObject to call as a constructor. +@param argumentCount An integer count of the number of arguments in arguments. +@param arguments A JSValue array of arguments to pass to the constructor. Pass NULL if argumentCount is 0. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The JSObject that results from calling object as a constructor, or NULL if an exception is thrown or object is not a constructor. +*/ +JS_EXPORT JSObjectRef JSObjectCallAsConstructor(JSContextRef ctx, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +/*! +@function +@abstract Gets the names of an object's enumerable properties. +@param ctx The execution context to use. +@param object The object whose property names you want to get. +@result A JSPropertyNameArray containing the names object's enumerable properties. Ownership follows the Create Rule. +*/ +JS_EXPORT JSPropertyNameArrayRef JSObjectCopyPropertyNames(JSContextRef ctx, JSObjectRef object); + +/*! +@function +@abstract Retains a JavaScript property name array. +@param array The JSPropertyNameArray to retain. +@result A JSPropertyNameArray that is the same as array. +*/ +JS_EXPORT JSPropertyNameArrayRef JSPropertyNameArrayRetain(JSPropertyNameArrayRef array); + +/*! +@function +@abstract Releases a JavaScript property name array. +@param array The JSPropetyNameArray to release. +*/ +JS_EXPORT void JSPropertyNameArrayRelease(JSPropertyNameArrayRef array); + +/*! +@function +@abstract Gets a count of the number of items in a JavaScript property name array. +@param array The array from which to retrieve the count. +@result An integer count of the number of names in array. +*/ +JS_EXPORT size_t JSPropertyNameArrayGetCount(JSPropertyNameArrayRef array); + +/*! +@function +@abstract Gets a property name at a given index in a JavaScript property name array. +@param array The array from which to retrieve the property name. +@param index The index of the property name to retrieve. +@result A JSStringRef containing the property name. +*/ +JS_EXPORT JSStringRef JSPropertyNameArrayGetNameAtIndex(JSPropertyNameArrayRef array, size_t index); + +/*! +@function +@abstract Adds a property name to a JavaScript property name accumulator. +@param accumulator The accumulator object to which to add the property name. +@param propertyName The property name to add. +*/ +JS_EXPORT void JSPropertyNameAccumulatorAddName(JSPropertyNameAccumulatorRef accumulator, JSStringRef propertyName); + +#ifdef __cplusplus +} +#endif + +#endif /* JSObjectRef_h */ diff --git a/API/JSObjectRefPrivate.h b/API/JSObjectRefPrivate.h new file mode 100644 index 0000000..32e80ab --- /dev/null +++ b/API/JSObjectRefPrivate.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSObjectRefPrivate_h +#define JSObjectRefPrivate_h + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + @function + @abstract Sets a private property on an object. This private property cannot be accessed from within JavaScript. + @param ctx The execution context to use. + @param object The JSObject whose private property you want to set. + @param propertyName A JSString containing the property's name. + @param value A JSValue to use as the property's value. This may be NULL. + @result true if object can store private data, otherwise false. + @discussion This API allows you to store JS values directly an object in a way that will be ensure that they are kept alive without exposing them to JavaScript code and without introducing the reference cycles that may occur when using JSValueProtect. + + The default object class does not allocate storage for private data. Only objects created with a non-NULL JSClass can store private properties. + */ +JS_EXPORT bool JSObjectSetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value); + +/*! + @function + @abstract Gets a private property from an object. + @param ctx The execution context to use. + @param object The JSObject whose private property you want to get. + @param propertyName A JSString containing the property's name. + @result The property's value if object has the property, otherwise NULL. + */ +JS_EXPORT JSValueRef JSObjectGetPrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName); + +/*! + @function + @abstract Deletes a private property from an object. + @param ctx The execution context to use. + @param object The JSObject whose private property you want to delete. + @param propertyName A JSString containing the property's name. + @result true if object can store private data, otherwise false. + @discussion The default object class does not allocate storage for private data. Only objects created with a non-NULL JSClass can store private data. + */ +JS_EXPORT bool JSObjectDeletePrivateProperty(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName); + +#ifdef __cplusplus +} +#endif + +#endif // JSObjectRefPrivate_h diff --git a/API/JSRemoteInspector.cpp b/API/JSRemoteInspector.cpp new file mode 100644 index 0000000..faebc5d --- /dev/null +++ b/API/JSRemoteInspector.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSRemoteInspector.h" + +#include "JSGlobalObjectConsoleClient.h" + +#if ENABLE(REMOTE_INSPECTOR) +#include "RemoteInspector.h" +#endif + +using namespace Inspector; + +static bool remoteInspectionEnabledByDefault = true; + +void JSRemoteInspectorDisableAutoStart(void) +{ +#if ENABLE(REMOTE_INSPECTOR) + RemoteInspector::startDisabled(); +#endif +} + +void JSRemoteInspectorStart(void) +{ +#if ENABLE(REMOTE_INSPECTOR) + RemoteInspector::singleton(); +#endif +} + +void JSRemoteInspectorSetParentProcessInformation(pid_t pid, const UInt8* auditData, size_t auditLength) +{ +#if ENABLE(REMOTE_INSPECTOR) + RetainPtr auditDataRef = adoptCF(CFDataCreate(kCFAllocatorDefault, auditData, auditLength)); + RemoteInspector::singleton().setParentProcessInformation(pid, auditDataRef); +#else + UNUSED_PARAM(pid); + UNUSED_PARAM(auditData); + UNUSED_PARAM(auditLength); +#endif +} + +void JSRemoteInspectorSetLogToSystemConsole(bool logToSystemConsole) +{ + JSGlobalObjectConsoleClient::setLogToSystemConsole(logToSystemConsole); +} + +bool JSRemoteInspectorGetInspectionEnabledByDefault(void) +{ + return remoteInspectionEnabledByDefault; +} + +void JSRemoteInspectorSetInspectionEnabledByDefault(bool enabledByDefault) +{ + remoteInspectionEnabledByDefault = enabledByDefault; +} diff --git a/API/JSRemoteInspector.h b/API/JSRemoteInspector.h new file mode 100644 index 0000000..2bde479 --- /dev/null +++ b/API/JSRemoteInspector.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSRemoteInspector_h +#define JSRemoteInspector_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +JS_EXPORT void JSRemoteInspectorDisableAutoStart(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorStart(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorSetParentProcessInformation(pid_t, const uint8_t* auditData, size_t auditLength) CF_AVAILABLE(10_11, 9_0); + +JS_EXPORT void JSRemoteInspectorSetLogToSystemConsole(bool) CF_AVAILABLE(10_11, 9_0); + +JS_EXPORT bool JSRemoteInspectorGetInspectionEnabledByDefault(void) CF_AVAILABLE(10_11, 9_0); +JS_EXPORT void JSRemoteInspectorSetInspectionEnabledByDefault(bool) CF_AVAILABLE(10_11, 9_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSRemoteInspector_h */ diff --git a/API/JSRetainPtr.h b/API/JSRetainPtr.h new file mode 100644 index 0000000..e400840 --- /dev/null +++ b/API/JSRetainPtr.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSRetainPtr_h +#define JSRetainPtr_h + +#include +#include +#include + +inline void JSRetain(JSStringRef string) { JSStringRetain(string); } +inline void JSRelease(JSStringRef string) { JSStringRelease(string); } +inline void JSRetain(JSGlobalContextRef context) { JSGlobalContextRetain(context); } +inline void JSRelease(JSGlobalContextRef context) { JSGlobalContextRelease(context); } + +enum AdoptTag { Adopt }; + +template class JSRetainPtr { +public: + JSRetainPtr() : m_ptr(0) { } + JSRetainPtr(T ptr) : m_ptr(ptr) { if (ptr) JSRetain(ptr); } + JSRetainPtr(AdoptTag, T ptr) : m_ptr(ptr) { } + JSRetainPtr(const JSRetainPtr&); + template JSRetainPtr(const JSRetainPtr&); + ~JSRetainPtr(); + + T get() const { return m_ptr; } + + void clear(); + T leakRef(); + + T operator->() const { return m_ptr; } + + bool operator!() const { return !m_ptr; } + explicit operator bool() const { return m_ptr; } + + JSRetainPtr& operator=(const JSRetainPtr&); + template JSRetainPtr& operator=(const JSRetainPtr&); + JSRetainPtr& operator=(T); + template JSRetainPtr& operator=(U*); + + void adopt(T); + + void swap(JSRetainPtr&); + +private: + T m_ptr; +}; + +inline JSRetainPtr adopt(JSStringRef o) +{ + return JSRetainPtr(Adopt, o); +} + +inline JSRetainPtr adopt(JSGlobalContextRef o) +{ + return JSRetainPtr(Adopt, o); +} + +template inline JSRetainPtr::JSRetainPtr(const JSRetainPtr& o) + : m_ptr(o.m_ptr) +{ + if (m_ptr) + JSRetain(m_ptr); +} + +template template inline JSRetainPtr::JSRetainPtr(const JSRetainPtr& o) + : m_ptr(o.get()) +{ + if (m_ptr) + JSRetain(m_ptr); +} + +template inline JSRetainPtr::~JSRetainPtr() +{ + if (m_ptr) + JSRelease(m_ptr); +} + +template inline void JSRetainPtr::clear() +{ + if (T ptr = m_ptr) { + m_ptr = 0; + JSRelease(ptr); + } +} + +template inline T JSRetainPtr::leakRef() +{ + T ptr = m_ptr; + m_ptr = 0; + return ptr; +} + +template inline JSRetainPtr& JSRetainPtr::operator=(const JSRetainPtr& o) +{ + T optr = o.get(); + if (optr) + JSRetain(optr); + T ptr = m_ptr; + m_ptr = optr; + if (ptr) + JSRelease(ptr); + return *this; +} + +template template inline JSRetainPtr& JSRetainPtr::operator=(const JSRetainPtr& o) +{ + T optr = o.get(); + if (optr) + JSRetain(optr); + T ptr = m_ptr; + m_ptr = optr; + if (ptr) + JSRelease(ptr); + return *this; +} + +template inline JSRetainPtr& JSRetainPtr::operator=(T optr) +{ + if (optr) + JSRetain(optr); + T ptr = m_ptr; + m_ptr = optr; + if (ptr) + JSRelease(ptr); + return *this; +} + +template inline void JSRetainPtr::adopt(T optr) +{ + T ptr = m_ptr; + m_ptr = optr; + if (ptr) + JSRelease(ptr); +} + +template template inline JSRetainPtr& JSRetainPtr::operator=(U* optr) +{ + if (optr) + JSRetain(optr); + T ptr = m_ptr; + m_ptr = optr; + if (ptr) + JSRelease(ptr); + return *this; +} + +template inline void JSRetainPtr::swap(JSRetainPtr& o) +{ + std::swap(m_ptr, o.m_ptr); +} + +template inline void swap(JSRetainPtr& a, JSRetainPtr& b) +{ + a.swap(b); +} + +template inline bool operator==(const JSRetainPtr& a, const JSRetainPtr& b) +{ + return a.get() == b.get(); +} + +template inline bool operator==(const JSRetainPtr& a, U* b) +{ + return a.get() == b; +} + +template inline bool operator==(T* a, const JSRetainPtr& b) +{ + return a == b.get(); +} + +template inline bool operator!=(const JSRetainPtr& a, const JSRetainPtr& b) +{ + return a.get() != b.get(); +} + +template inline bool operator!=(const JSRetainPtr& a, U* b) +{ + return a.get() != b; +} + +template inline bool operator!=(T* a, const JSRetainPtr& b) +{ + return a != b.get(); +} + + +#endif // JSRetainPtr_h diff --git a/API/JSScriptRef.cpp b/API/JSScriptRef.cpp new file mode 100644 index 0000000..306e046 --- /dev/null +++ b/API/JSScriptRef.cpp @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "APICast.h" +#include "Completion.h" +#include "Exception.h" +#include "JSBasePrivate.h" +#include "VM.h" +#include "JSScriptRefPrivate.h" +#include "OpaqueJSString.h" +#include "JSCInlines.h" +#include "Parser.h" +#include "SourceCode.h" +#include "SourceProvider.h" + +using namespace JSC; + +struct OpaqueJSScript : public SourceProvider { +public: + static WTF::RefPtr create(VM* vm, const String& url, int startingLineNumber, const String& source) + { + return WTF::adoptRef(*new OpaqueJSScript(vm, url, startingLineNumber, source)); + } + + unsigned hash() const override + { + return m_source.get().hash(); + } + + StringView source() const override + { + return m_source.get(); + } + + VM* vm() const { return m_vm; } + +private: + OpaqueJSScript(VM* vm, const String& url, int startingLineNumber, const String& source) + : SourceProvider(url, TextPosition(OrdinalNumber::fromOneBasedInt(startingLineNumber), OrdinalNumber()), SourceProviderSourceType::Program) + , m_vm(vm) + , m_source(source.isNull() ? *StringImpl::empty() : *source.impl()) + { + } + + virtual ~OpaqueJSScript() { } + + VM* m_vm; + Ref m_source; +}; + +static bool parseScript(VM* vm, const SourceCode& source, ParserError& error) +{ + return !!JSC::parse( + vm, source, Identifier(), JSParserBuiltinMode::NotBuiltin, + JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, + error); +} + +extern "C" { + +JSScriptRef JSScriptCreateReferencingImmortalASCIIText(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, const char* source, size_t length, JSStringRef* errorMessage, int* errorLine) +{ + VM* vm = toJS(contextGroup); + JSLockHolder locker(vm); + for (size_t i = 0; i < length; i++) { + if (!isASCII(source[i])) + return 0; + } + + startingLineNumber = std::max(1, startingLineNumber); + + auto result = OpaqueJSScript::create(vm, url ? url->string() : String(), startingLineNumber, String(StringImpl::createFromLiteral(source, length))); + + ParserError error; + if (!parseScript(vm, SourceCode(result), error)) { + if (errorMessage) + *errorMessage = OpaqueJSString::create(error.message()).leakRef(); + if (errorLine) + *errorLine = error.line(); + return nullptr; + } + + return result.leakRef(); +} + +JSScriptRef JSScriptCreateFromString(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, JSStringRef source, JSStringRef* errorMessage, int* errorLine) +{ + VM* vm = toJS(contextGroup); + JSLockHolder locker(vm); + + startingLineNumber = std::max(1, startingLineNumber); + + auto result = OpaqueJSScript::create(vm, url ? url->string() : String(), startingLineNumber, source->string()); + + ParserError error; + if (!parseScript(vm, SourceCode(result), error)) { + if (errorMessage) + *errorMessage = OpaqueJSString::create(error.message()).leakRef(); + if (errorLine) + *errorLine = error.line(); + return nullptr; + } + + return result.leakRef(); +} + +void JSScriptRetain(JSScriptRef script) +{ + JSLockHolder locker(script->vm()); + script->ref(); +} + +void JSScriptRelease(JSScriptRef script) +{ + JSLockHolder locker(script->vm()); + script->deref(); +} + +JSValueRef JSScriptEvaluate(JSContextRef context, JSScriptRef script, JSValueRef thisValueRef, JSValueRef* exception) +{ + ExecState* exec = toJS(context); + JSLockHolder locker(exec); + if (script->vm() != &exec->vm()) { + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + NakedPtr internalException; + JSValue thisValue = thisValueRef ? toJS(exec, thisValueRef) : jsUndefined(); + JSValue result = evaluate(exec, SourceCode(script), thisValue, internalException); + if (internalException) { + if (exception) + *exception = toRef(exec, internalException->value()); + return 0; + } + ASSERT(result); + return toRef(exec, result); +} + +} diff --git a/API/JSScriptRefPrivate.h b/API/JSScriptRefPrivate.h new file mode 100644 index 0000000..e6224ce --- /dev/null +++ b/API/JSScriptRefPrivate.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSScriptRefPrivate_h +#define JSScriptRefPrivate_h + +#include +#include +#include + +/*! @typedef JSScriptRef A JavaScript script reference. */ +typedef struct OpaqueJSScript* JSScriptRef; + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + @function + @abstract Creates a script reference from an ascii string, without copying or taking ownership of the string + @param contextGroup The context group the script is to be used in. + @param url The source url to be reported in errors and exceptions. + @param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. + @param source The source string. This is required to be pure ASCII and to never be deallocated. + @param length The length of the source string. + @param errorMessage A pointer to a JSStringRef in which to store the parse error message if the source is not valid. Pass NULL if you do not care to store an error message. + @param errorLine A pointer to an int in which to store the line number of a parser error. Pass NULL if you do not care to store an error line. + @result A JSScriptRef for the provided source, or NULL if any non-ASCII character is found in source or if the source is not a valid JavaScript program. Ownership follows the Create Rule. + @discussion Use this function to create a reusable script reference with a constant + buffer as the backing string. The source string must outlive the global context. + */ +JS_EXPORT JSScriptRef JSScriptCreateReferencingImmortalASCIIText(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, const char* source, size_t length, JSStringRef* errorMessage, int* errorLine); + +/*! + @function + @abstract Creates a script reference from a string + @param contextGroup The context group the script is to be used in. + @param url The source url to be reported in errors and exceptions. + @param startingLineNumber An integer value specifying the script's starting line number in the file located at sourceURL. This is only used when reporting exceptions. The value is one-based, so the first line is line 1 and invalid values are clamped to 1. + @param source The source string. + @param errorMessage A pointer to a JSStringRef in which to store the parse error message if the source is not valid. Pass NULL if you do not care to store an error message. + @param errorLine A pointer to an int in which to store the line number of a parser error. Pass NULL if you do not care to store an error line. + @result A JSScriptRef for the provided source, or NULL is the source is not a valid JavaScript program. Ownership follows the Create Rule. + */ +JS_EXPORT JSScriptRef JSScriptCreateFromString(JSContextGroupRef contextGroup, JSStringRef url, int startingLineNumber, JSStringRef source, JSStringRef* errorMessage, int* errorLine); + +/*! + @function + @abstract Retains a JavaScript script. + @param script The script to retain. + */ +JS_EXPORT void JSScriptRetain(JSScriptRef script); + +/*! + @function + @abstract Releases a JavaScript script. + @param script The script to release. + */ +JS_EXPORT void JSScriptRelease(JSScriptRef script); + +/*! + @function + @abstract Evaluates a JavaScript script. + @param ctx The execution context to use. + @param script The JSScript to evaluate. + @param thisValue The value to use as "this" when evaluating the script. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The JSValue that results from evaluating script, or NULL if an exception is thrown. + */ +JS_EXPORT JSValueRef JSScriptEvaluate(JSContextRef ctx, JSScriptRef script, JSValueRef thisValue, JSValueRef* exception); + + +#ifdef __cplusplus +} +#endif + +#endif /* JSScriptRefPrivate_h */ diff --git a/API/JSStringRef.cpp b/API/JSStringRef.cpp new file mode 100644 index 0000000..c9b380c --- /dev/null +++ b/API/JSStringRef.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSStringRef.h" +#include "JSStringRefPrivate.h" + +#include "InitializeThreading.h" +#include "OpaqueJSString.h" +#include + +using namespace JSC; +using namespace WTF::Unicode; + +JSStringRef JSStringCreateWithCharacters(const JSChar* chars, size_t numChars) +{ + initializeThreading(); + return &OpaqueJSString::create(chars, numChars).leakRef(); +} + +JSStringRef JSStringCreateWithUTF8CString(const char* string) +{ + initializeThreading(); + if (string) { + size_t length = strlen(string); + Vector buffer(length); + UChar* p = buffer.data(); + bool sourceIsAllASCII; + const LChar* stringStart = reinterpret_cast(string); + if (conversionOK == convertUTF8ToUTF16(&string, string + length, &p, p + length, &sourceIsAllASCII)) { + if (sourceIsAllASCII) + return &OpaqueJSString::create(stringStart, length).leakRef(); + return &OpaqueJSString::create(buffer.data(), p - buffer.data()).leakRef(); + } + } + + return &OpaqueJSString::create().leakRef(); +} + +JSStringRef JSStringCreateWithCharactersNoCopy(const JSChar* chars, size_t numChars) +{ + initializeThreading(); + return OpaqueJSString::create(StringImpl::createWithoutCopying(chars, numChars)).leakRef(); +} + +JSStringRef JSStringRetain(JSStringRef string) +{ + string->ref(); + return string; +} + +void JSStringRelease(JSStringRef string) +{ + string->deref(); +} + +size_t JSStringGetLength(JSStringRef string) +{ + if (!string) + return 0; + return string->length(); +} + +const JSChar* JSStringGetCharactersPtr(JSStringRef string) +{ + if (!string) + return nullptr; + return string->characters(); +} + +size_t JSStringGetMaximumUTF8CStringSize(JSStringRef string) +{ + // Any UTF8 character > 3 bytes encodes as a UTF16 surrogate pair. + return string->length() * 3 + 1; // + 1 for terminating '\0' +} + +size_t JSStringGetUTF8CString(JSStringRef string, char* buffer, size_t bufferSize) +{ + if (!string || !buffer || !bufferSize) + return 0; + + char* destination = buffer; + ConversionResult result; + if (string->is8Bit()) { + const LChar* source = string->characters8(); + result = convertLatin1ToUTF8(&source, source + string->length(), &destination, destination + bufferSize - 1); + } else { + const UChar* source = string->characters16(); + result = convertUTF16ToUTF8(&source, source + string->length(), &destination, destination + bufferSize - 1, true); + } + + *destination++ = '\0'; + if (result != conversionOK && result != targetExhausted) + return 0; + + return destination - buffer; +} + +bool JSStringIsEqual(JSStringRef a, JSStringRef b) +{ + return OpaqueJSString::equal(a, b); +} + +bool JSStringIsEqualToUTF8CString(JSStringRef a, const char* b) +{ + JSStringRef bBuf = JSStringCreateWithUTF8CString(b); + bool result = JSStringIsEqual(a, bBuf); + JSStringRelease(bBuf); + + return result; +} diff --git a/API/JSStringRef.h b/API/JSStringRef.h new file mode 100644 index 0000000..bc03ed7 --- /dev/null +++ b/API/JSStringRef.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRef_h +#define JSStringRef_h + +#include + +#ifndef __cplusplus +#include +#endif +#include /* for size_t */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(_NATIVE_WCHAR_T_DEFINED) /* MSVC */ \ + && (!defined(__WCHAR_MAX__) || (__WCHAR_MAX__ > 0xffffU)) /* ISO C/C++ */ \ + && (!defined(WCHAR_MAX) || (WCHAR_MAX > 0xffffU)) /* RVCT */ +/*! +@typedef JSChar +@abstract A UTF-16 code unit. One, or a sequence of two, can encode any Unicode + character. As with all scalar types, endianness depends on the underlying + architecture. +*/ + typedef unsigned short JSChar; +#else + typedef wchar_t JSChar; +#endif + +/*! +@function +@abstract Creates a JavaScript string from a buffer of Unicode characters. +@param chars The buffer of Unicode characters to copy into the new JSString. +@param numChars The number of characters to copy from the buffer pointed to by chars. +@result A JSString containing chars. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithCharacters(const JSChar* chars, size_t numChars); +/*! +@function +@abstract Creates a JavaScript string from a null-terminated UTF8 string. +@param string The null-terminated UTF8 string to copy into the new JSString. +@result A JSString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithUTF8CString(const char* string); + +/*! +@function +@abstract Retains a JavaScript string. +@param string The JSString to retain. +@result A JSString that is the same as string. +*/ +JS_EXPORT JSStringRef JSStringRetain(JSStringRef string); +/*! +@function +@abstract Releases a JavaScript string. +@param string The JSString to release. +*/ +JS_EXPORT void JSStringRelease(JSStringRef string); + +/*! +@function +@abstract Returns the number of Unicode characters in a JavaScript string. +@param string The JSString whose length (in Unicode characters) you want to know. +@result The number of Unicode characters stored in string. +*/ +JS_EXPORT size_t JSStringGetLength(JSStringRef string); +/*! +@function +@abstract Returns a pointer to the Unicode character buffer that + serves as the backing store for a JavaScript string. +@param string The JSString whose backing store you want to access. +@result A pointer to the Unicode character buffer that serves as string's + backing store, which will be deallocated when string is deallocated. +*/ +JS_EXPORT const JSChar* JSStringGetCharactersPtr(JSStringRef string); + +/*! +@function +@abstract Returns the maximum number of bytes a JavaScript string will + take up if converted into a null-terminated UTF8 string. +@param string The JSString whose maximum converted size (in bytes) you + want to know. +@result The maximum number of bytes that could be required to convert string into a + null-terminated UTF8 string. The number of bytes that the conversion actually ends + up requiring could be less than this, but never more. +*/ +JS_EXPORT size_t JSStringGetMaximumUTF8CStringSize(JSStringRef string); +/*! +@function +@abstract Converts a JavaScript string into a null-terminated UTF8 string, + and copies the result into an external byte buffer. +@param string The source JSString. +@param buffer The destination byte buffer into which to copy a null-terminated + UTF8 representation of string. On return, buffer contains a UTF8 string + representation of string. If bufferSize is too small, buffer will contain only + partial results. If buffer is not at least bufferSize bytes in size, + behavior is undefined. +@param bufferSize The size of the external buffer in bytes. +@result The number of bytes written into buffer (including the null-terminator byte). +*/ +JS_EXPORT size_t JSStringGetUTF8CString(JSStringRef string, char* buffer, size_t bufferSize); + +/*! +@function +@abstract Tests whether two JavaScript strings match. +@param a The first JSString to test. +@param b The second JSString to test. +@result true if the two strings match, otherwise false. +*/ +JS_EXPORT bool JSStringIsEqual(JSStringRef a, JSStringRef b); +/*! +@function +@abstract Tests whether a JavaScript string matches a null-terminated UTF8 string. +@param a The JSString to test. +@param b The null-terminated UTF8 string to test. +@result true if the two strings match, otherwise false. +*/ +JS_EXPORT bool JSStringIsEqualToUTF8CString(JSStringRef a, const char* b); + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRef_h */ diff --git a/API/JSStringRefBSTR.cpp b/API/JSStringRefBSTR.cpp new file mode 100644 index 0000000..e900d24 --- /dev/null +++ b/API/JSStringRefBSTR.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSStringRefBSTR.h" + +#include "JSStringRef.h" + +JSStringRef JSStringCreateWithBSTR(BSTR string) +{ + return JSStringCreateWithCharacters(string ? string : L"", string ? SysStringLen(string) : 0); +} + +BSTR JSStringCopyBSTR(const JSStringRef string) +{ + return SysAllocStringLen(JSStringGetCharactersPtr(string), JSStringGetLength(string)); +} diff --git a/API/JSStringRefBSTR.h b/API/JSStringRefBSTR.h new file mode 100644 index 0000000..066c68d --- /dev/null +++ b/API/JSStringRefBSTR.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRefBSTR_h +#define JSStringRefBSTR_h + +#include "JSBase.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* COM convenience methods */ + +/*! +@function +@abstract Creates a JavaScript string from a BSTR. +@param string The BSTR to copy into the new JSString. +@result A JSString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithBSTR(const BSTR string); + +/*! +@function +@abstract Creates a BSTR from a JavaScript string. +@param string The JSString to copy into the new BSTR. +@result A BSTR containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT BSTR JSStringCopyBSTR(const JSStringRef string); + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRefBSTR_h */ diff --git a/API/JSStringRefCF.cpp b/API/JSStringRefCF.cpp new file mode 100644 index 0000000..0587259 --- /dev/null +++ b/API/JSStringRefCF.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSStringRefCF.h" + +#include "APICast.h" +#include "InitializeThreading.h" +#include "JSCJSValue.h" +#include "JSStringRef.h" +#include "OpaqueJSString.h" +#include + +JSStringRef JSStringCreateWithCFString(CFStringRef string) +{ + JSC::initializeThreading(); + + // We cannot use CFIndex here since CFStringGetLength can return values larger than + // it can hold. () + size_t length = CFStringGetLength(string); + if (!length) + return &OpaqueJSString::create(reinterpret_cast(""), 0).leakRef(); + + Vector lcharBuffer(length); + CFIndex usedBufferLength; + CFIndex convertedSize = CFStringGetBytes(string, CFRangeMake(0, length), kCFStringEncodingISOLatin1, 0, false, lcharBuffer.data(), length, &usedBufferLength); + if (static_cast(convertedSize) == length && static_cast(usedBufferLength) == length) + return &OpaqueJSString::create(lcharBuffer.data(), length).leakRef(); + + auto buffer = std::make_unique(length); + CFStringGetCharacters(string, CFRangeMake(0, length), buffer.get()); + static_assert(sizeof(UniChar) == sizeof(UChar), "UniChar and UChar must be same size"); + return &OpaqueJSString::create(reinterpret_cast(buffer.get()), length).leakRef(); +} + +CFStringRef JSStringCopyCFString(CFAllocatorRef allocator, JSStringRef string) +{ + if (!string || !string->length()) + return CFSTR(""); + + if (string->is8Bit()) + return CFStringCreateWithBytes(allocator, reinterpret_cast(string->characters8()), string->length(), kCFStringEncodingISOLatin1, false); + + return CFStringCreateWithCharacters(allocator, reinterpret_cast(string->characters16()), string->length()); +} diff --git a/API/JSStringRefCF.h b/API/JSStringRefCF.h new file mode 100644 index 0000000..1e210c7 --- /dev/null +++ b/API/JSStringRefCF.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRefCF_h +#define JSStringRefCF_h + +#include "JSBase.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* CFString convenience methods */ + +/*! +@function +@abstract Creates a JavaScript string from a CFString. +@discussion This function is optimized to take advantage of cases when + CFStringGetCharactersPtr returns a valid pointer. +@param string The CFString to copy into the new JSString. +@result A JSString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSStringCreateWithCFString(CFStringRef string); +/*! +@function +@abstract Creates a CFString from a JavaScript string. +@param alloc The alloc parameter to pass to CFStringCreate. +@param string The JSString to copy into the new CFString. +@result A CFString containing string. Ownership follows the Create Rule. +*/ +JS_EXPORT CFStringRef JSStringCopyCFString(CFAllocatorRef alloc, JSStringRef string) CF_RETURNS_RETAINED; + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRefCF_h */ diff --git a/API/JSStringRefPrivate.h b/API/JSStringRefPrivate.h new file mode 100644 index 0000000..f1db806 --- /dev/null +++ b/API/JSStringRefPrivate.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSStringRefPrivate_h +#define JSStringRefPrivate_h + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +JS_EXPORT JSStringRef JSStringCreateWithCharactersNoCopy(const JSChar* chars, size_t numChars); + +#ifdef __cplusplus +} +#endif + +#endif /* JSStringRefPrivate_h */ diff --git a/API/JSTypedArray.cpp b/API/JSTypedArray.cpp new file mode 100644 index 0000000..14883da --- /dev/null +++ b/API/JSTypedArray.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2015 Dominic Szablewski (dominic@phoboslab.org) + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSTypedArray.h" + +#include "APICast.h" +#include "APIUtils.h" +#include "ClassInfo.h" +#include "Error.h" +#include "JSArrayBufferViewInlines.h" +#include "JSCInlines.h" +#include "JSDataView.h" +#include "JSGenericTypedArrayViewInlines.h" +#include "JSTypedArrays.h" +#include "TypedArrayController.h" +#include + +using namespace JSC; + +// Helper functions. + +inline JSTypedArrayType toJSTypedArrayType(TypedArrayType type) +{ + switch (type) { + case JSC::TypeDataView: + case NotTypedArray: + return kJSTypedArrayTypeNone; + case TypeInt8: + return kJSTypedArrayTypeInt8Array; + case TypeUint8: + return kJSTypedArrayTypeUint8Array; + case TypeUint8Clamped: + return kJSTypedArrayTypeUint8ClampedArray; + case TypeInt16: + return kJSTypedArrayTypeInt16Array; + case TypeUint16: + return kJSTypedArrayTypeUint16Array; + case TypeInt32: + return kJSTypedArrayTypeInt32Array; + case TypeUint32: + return kJSTypedArrayTypeUint32Array; + case TypeFloat32: + return kJSTypedArrayTypeFloat32Array; + case TypeFloat64: + return kJSTypedArrayTypeFloat64Array; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +inline TypedArrayType toTypedArrayType(JSTypedArrayType type) +{ + switch (type) { + case kJSTypedArrayTypeArrayBuffer: + case kJSTypedArrayTypeNone: + return NotTypedArray; + case kJSTypedArrayTypeInt8Array: + return TypeInt8; + case kJSTypedArrayTypeUint8Array: + return TypeUint8; + case kJSTypedArrayTypeUint8ClampedArray: + return TypeUint8Clamped; + case kJSTypedArrayTypeInt16Array: + return TypeInt16; + case kJSTypedArrayTypeUint16Array: + return TypeUint16; + case kJSTypedArrayTypeInt32Array: + return TypeInt32; + case kJSTypedArrayTypeUint32Array: + return TypeUint32; + case kJSTypedArrayTypeFloat32Array: + return TypeFloat32; + case kJSTypedArrayTypeFloat64Array: + return TypeFloat64; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +static JSObject* createTypedArray(ExecState* exec, JSTypedArrayType type, RefPtr&& buffer, size_t offset, size_t length) +{ + VM& vm = exec->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + JSGlobalObject* globalObject = exec->lexicalGlobalObject(); + if (!buffer) { + throwOutOfMemoryError(exec, scope); + return nullptr; + } + switch (type) { + case kJSTypedArrayTypeInt8Array: + return JSInt8Array::create(exec, globalObject->typedArrayStructure(TypeInt8), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeInt16Array: + return JSInt16Array::create(exec, globalObject->typedArrayStructure(TypeInt16), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeInt32Array: + return JSInt32Array::create(exec, globalObject->typedArrayStructure(TypeInt32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint8Array: + return JSUint8Array::create(exec, globalObject->typedArrayStructure(TypeUint8), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint8ClampedArray: + return JSUint8ClampedArray::create(exec, globalObject->typedArrayStructure(TypeUint8Clamped), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint16Array: + return JSUint16Array::create(exec, globalObject->typedArrayStructure(TypeUint16), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeUint32Array: + return JSUint32Array::create(exec, globalObject->typedArrayStructure(TypeUint32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeFloat32Array: + return JSFloat32Array::create(exec, globalObject->typedArrayStructure(TypeFloat32), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeFloat64Array: + return JSFloat64Array::create(exec, globalObject->typedArrayStructure(TypeFloat64), WTFMove(buffer), offset, length); + case kJSTypedArrayTypeArrayBuffer: + case kJSTypedArrayTypeNone: + RELEASE_ASSERT_NOT_REACHED(); + } + return nullptr; +} + +// Implementations of the API functions. + +JSTypedArrayType JSValueGetTypedArrayType(JSContextRef ctx, JSValueRef valueRef, JSValueRef*) +{ + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue value = toJS(exec, valueRef); + if (!value.isObject()) + return kJSTypedArrayTypeNone; + JSObject* object = value.getObject(); + + if (jsDynamicCast(object)) + return kJSTypedArrayTypeArrayBuffer; + + return toJSTypedArrayType(object->classInfo()->typedArrayStorageType); +} + +JSObjectRef JSObjectMakeTypedArray(JSContextRef ctx, JSTypedArrayType arrayType, size_t length, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + auto buffer = ArrayBuffer::tryCreate(length, elementByteSize); + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, length); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithBytesNoCopy(JSContextRef ctx, JSTypedArrayType arrayType, void* bytes, size_t length, JSTypedArrayBytesDeallocator destructor, void* destructorContext, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + RefPtr buffer = ArrayBuffer::createFromBytes(bytes, length, [=](void* p) { + if (destructor) + destructor(p, destructorContext); + }); + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, length / elementByteSize); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithArrayBuffer(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef jsBufferRef, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + JSArrayBuffer* jsBuffer = jsDynamicCast(toJS(jsBufferRef)); + if (!jsBuffer) { + setException(exec, exception, createTypeError(exec, "JSObjectMakeTypedArrayWithArrayBuffer expects buffer to be an Array Buffer object")); + return nullptr; + } + + RefPtr buffer = jsBuffer->impl(); + unsigned elementByteSize = elementSize(toTypedArrayType(arrayType)); + + JSObject* result = createTypedArray(exec, arrayType, WTFMove(buffer), 0, buffer->byteLength() / elementByteSize); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +JSObjectRef JSObjectMakeTypedArrayWithArrayBufferAndOffset(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef jsBufferRef, size_t offset, size_t length, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + if (arrayType == kJSTypedArrayTypeNone || arrayType == kJSTypedArrayTypeArrayBuffer) + return nullptr; + + JSArrayBuffer* jsBuffer = jsDynamicCast(toJS(jsBufferRef)); + if (!jsBuffer) { + setException(exec, exception, createTypeError(exec, "JSObjectMakeTypedArrayWithArrayBuffer expects buffer to be an Array Buffer object")); + return nullptr; + } + + JSObject* result = createTypedArray(exec, arrayType, jsBuffer->impl(), offset, length); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + return toRef(result); +} + +void* JSObjectGetTypedArrayBytesPtr(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(object)) { + ArrayBuffer* buffer = typedArray->possiblySharedBuffer(); + buffer->pinAndLock(); + return buffer->data(); + } + return nullptr; +} + +size_t JSObjectGetTypedArrayLength(JSContextRef, JSObjectRef objectRef, JSValueRef*) +{ + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(object)) + return typedArray->length(); + + return 0; +} + +size_t JSObjectGetTypedArrayByteLength(JSContextRef, JSObjectRef objectRef, JSValueRef*) +{ + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(object)) + return typedArray->length() * elementSize(typedArray->classInfo()->typedArrayStorageType); + + return 0; +} + +size_t JSObjectGetTypedArrayByteOffset(JSContextRef, JSObjectRef objectRef, JSValueRef*) +{ + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(object)) + return typedArray->byteOffset(); + + return 0; +} + +JSObjectRef JSObjectGetTypedArrayBuffer(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBufferView* typedArray = jsDynamicCast(object)) + return toRef(exec->vm().m_typedArrayController->toJS(exec, typedArray->globalObject(), typedArray->possiblySharedBuffer())); + + return nullptr; +} + +JSObjectRef JSObjectMakeArrayBufferWithBytesNoCopy(JSContextRef ctx, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + auto buffer = ArrayBuffer::createFromBytes(bytes, byteLength, [=](void* p) { + if (bytesDeallocator) + bytesDeallocator(p, deallocatorContext); + }); + + JSArrayBuffer* jsBuffer = JSArrayBuffer::create(exec->vm(), exec->lexicalGlobalObject()->arrayBufferStructure(ArrayBufferSharingMode::Default), WTFMove(buffer)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return nullptr; + + return toRef(jsBuffer); +} + +void* JSObjectGetArrayBufferBytesPtr(JSContextRef ctx, JSObjectRef objectRef, JSValueRef*) +{ + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* object = toJS(objectRef); + + if (JSArrayBuffer* jsBuffer = jsDynamicCast(object)) { + ArrayBuffer* buffer = jsBuffer->impl(); + buffer->pinAndLock(); + return buffer->data(); + } + return nullptr; +} + +size_t JSObjectGetArrayBufferByteLength(JSContextRef, JSObjectRef objectRef, JSValueRef*) +{ + JSObject* object = toJS(objectRef); + + if (JSArrayBuffer* jsBuffer = jsDynamicCast(object)) + return jsBuffer->impl()->byteLength(); + + return 0; +} diff --git a/API/JSTypedArray.h b/API/JSTypedArray.h new file mode 100644 index 0000000..e23b76d --- /dev/null +++ b/API/JSTypedArray.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2015 Dominic Szablewski (dominic@phoboslab.org) + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSTypedArray_h +#define JSTypedArray_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------- Typed Array functions -------------- + +/*! + @function + @abstract Creates a JavaScript Typed Array object with the given number of elements. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param length The number of elements to be in the new Typed Array. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array with all elements set to zero or NULL if there was an error. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArray(JSContextRef ctx, JSTypedArrayType arrayType, size_t length, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing pointer. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param bytes A pointer to the byte buffer to be used as the backing store of the Typed Array object. + @param byteLength The number of bytes pointed to by the parameter bytes. + @param bytesDeallocator The allocator to use to deallocate the external buffer when the JSTypedArrayData object is deallocated. + @param deallocatorContext A pointer to pass back to the deallocator. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef Typed Array whose backing store is the same as the one pointed to by bytes or NULL if there was an error. + @discussion If an exception is thrown during this function the bytesDeallocator will always be called. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithBytesNoCopy(JSContextRef ctx, JSTypedArrayType arrayType, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing JavaScript Array Buffer object. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param buffer An Array Buffer object that should be used as the backing store for the created JavaScript Typed Array object. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array or NULL if there was an error. The backing store of the Typed Array will be buffer. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithArrayBuffer(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef buffer, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Creates a JavaScript Typed Array object from an existing JavaScript Array Buffer object with the given offset and length. + @param ctx The execution context to use. + @param arrayType A value identifying the type of array to create. If arrayType is kJSTypedArrayTypeNone or kJSTypedArrayTypeArrayBuffer then NULL will be returned. + @param buffer An Array Buffer object that should be used as the backing store for the created JavaScript Typed Array object. + @param byteOffset The byte offset for the created Typed Array. byteOffset should aligned with the element size of arrayType. + @param length The number of elements to include in the Typed Array. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef that is a Typed Array or NULL if there was an error. The backing store of the Typed Array will be buffer. + */ +JS_EXPORT JSObjectRef JSObjectMakeTypedArrayWithArrayBufferAndOffset(JSContextRef ctx, JSTypedArrayType arrayType, JSObjectRef buffer, size_t byteOffset, size_t length, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns a temporary pointer to the backing store of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose backing store pointer to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A pointer to the raw data buffer that serves as object's backing store or NULL if object is not a Typed Array object. + @discussion The pointer returned by this function is temporary and is not guaranteed to remain valid across JavaScriptCore API calls. + */ +JS_EXPORT void* JSObjectGetTypedArrayBytesPtr(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the length of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose length to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The length of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the byte length of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose byte length to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The byte length of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayByteLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the byte offset of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The Typed Array object whose byte offset to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The byte offset of the Typed Array object or 0 if the object is not a Typed Array object. + */ +JS_EXPORT size_t JSObjectGetTypedArrayByteOffset(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the JavaScript Array Buffer object that is used as the backing of a JavaScript Typed Array object. + @param ctx The execution context to use. + @param object The JSObjectRef whose Typed Array type data pointer to obtain. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef with a JSTypedArrayType of kJSTypedArrayTypeArrayBuffer or NULL if object is not a Typed Array. + */ +JS_EXPORT JSObjectRef JSObjectGetTypedArrayBuffer(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +// ------------- Array Buffer functions ------------- + +/*! + @function + @abstract Creates a JavaScript Array Buffer object from an existing pointer. + @param ctx The execution context to use. + @param bytes A pointer to the byte buffer to be used as the backing store of the Typed Array object. + @param byteLength The number of bytes pointed to by the parameter bytes. + @param bytesDeallocator The allocator to use to deallocate the external buffer when the Typed Array data object is deallocated. + @param deallocatorContext A pointer to pass back to the deallocator. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSObjectRef Array Buffer whose backing store is the same as the one pointed to by bytes or NULL if there was an error. + @discussion If an exception is thrown during this function the bytesDeallocator will always be called. + */ +JS_EXPORT JSObjectRef JSObjectMakeArrayBufferWithBytesNoCopy(JSContextRef ctx, void* bytes, size_t byteLength, JSTypedArrayBytesDeallocator bytesDeallocator, void* deallocatorContext, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns a pointer to the data buffer that serves as the backing store for a JavaScript Typed Array object. + @param object The Array Buffer object whose internal backing store pointer to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A pointer to the raw data buffer that serves as object's backing store or NULL if object is not an Array Buffer object. + @discussion The pointer returned by this function is temporary and is not guaranteed to remain valid across JavaScriptCore API calls. + */ +JS_EXPORT void* JSObjectGetArrayBufferBytesPtr(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/*! + @function + @abstract Returns the number of bytes in a JavaScript data object. + @param ctx The execution context to use. + @param object The JS Arary Buffer object whose length in bytes to return. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result The number of bytes stored in the data object. + */ +JS_EXPORT size_t JSObjectGetArrayBufferByteLength(JSContextRef ctx, JSObjectRef object, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +#ifdef __cplusplus +} +#endif + +#endif /* JSTypedArray_h */ diff --git a/API/JSValue.h b/API/JSValue.h new file mode 100644 index 0000000..1410dd7 --- /dev/null +++ b/API/JSValue.h @@ -0,0 +1,668 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSValue_h +#define JSValue_h + +#if JSC_OBJC_API_ENABLED + +#import + +@class JSContext; + +/*! +@interface +@discussion A JSValue is a reference to a JavaScript value. Every JSValue + originates from a JSContext and holds a strong reference to it. + When a JSValue instance method creates a new JSValue, the new value + originates from the same JSContext. + + All JSValues values also originate from a JSVirtualMachine + (available indirectly via the context property). It is an error to pass a + JSValue to a method or property of a JSValue or JSContext originating from a + different JSVirtualMachine. Doing so will raise an Objective-C exception. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSValue : NSObject + +/*! +@property +@abstract The JSContext that this value originates from. +*/ +@property (readonly, strong) JSContext *context; + +/*! +@methodgroup Creating JavaScript Values +*/ +/*! +@method +@abstract Create a JSValue by converting an Objective-C object. +@discussion The resulting JSValue retains the provided Objective-C object. +@param value The Objective-C object to be converted. +@result The new JSValue. +*/ ++ (JSValue *)valueWithObject:(id)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a BOOL primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithBool:(BOOL)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a double primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithDouble:(double)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from an int32_t primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithInt32:(int32_t)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JavaScript value from a uint32_t primitive. +@param context The JSContext in which the resulting JSValue will be created. +@result The new JSValue representing the equivalent boolean value. +*/ ++ (JSValue *)valueWithUInt32:(uint32_t)value inContext:(JSContext *)context; + +/*! +@method +@abstract Create a new, empty JavaScript object. +@param context The JSContext in which the resulting object will be created. +@result The new JavaScript object. +*/ ++ (JSValue *)valueWithNewObjectInContext:(JSContext *)context; + +/*! +@method +@abstract Create a new, empty JavaScript array. +@param context The JSContext in which the resulting array will be created. +@result The new JavaScript array. +*/ ++ (JSValue *)valueWithNewArrayInContext:(JSContext *)context; + +/*! +@method +@abstract Create a new JavaScript regular expression object. +@param pattern The regular expression pattern. +@param flags The regular expression flags. +@param context The JSContext in which the resulting regular expression object will be created. +@result The new JavaScript regular expression object. +*/ ++ (JSValue *)valueWithNewRegularExpressionFromPattern:(NSString *)pattern flags:(NSString *)flags inContext:(JSContext *)context; + +/*! +@method +@abstract Create a new JavaScript error object. +@param message The error message. +@param context The JSContext in which the resulting error object will be created. +@result The new JavaScript error object. +*/ ++ (JSValue *)valueWithNewErrorFromMessage:(NSString *)message inContext:(JSContext *)context; + +/*! +@method +@abstract Create the JavaScript value null. +@param context The JSContext to which the resulting JSValue belongs. +@result The JSValue representing the JavaScript value null. +*/ ++ (JSValue *)valueWithNullInContext:(JSContext *)context; + +/*! +@method +@abstract Create the JavaScript value undefined. +@param context The JSContext to which the resulting JSValue belongs. +@result The JSValue representing the JavaScript value undefined. +*/ ++ (JSValue *)valueWithUndefinedInContext:(JSContext *)context; + +/*! +@methodgroup Converting to Objective-C Types +@discussion When converting between JavaScript values and Objective-C objects a copy is + performed. Values of types listed below are copied to the corresponding + types on conversion in each direction. For NSDictionaries, entries in the + dictionary that are keyed by strings are copied onto a JavaScript object. + For dictionaries and arrays, conversion is recursive, with the same object + conversion being applied to all entries in the collection. + +
+@textblock
+   Objective-C type  |   JavaScript type
+ --------------------+---------------------
+         nil         |     undefined
+        NSNull       |        null
+       NSString      |       string
+       NSNumber      |   number, boolean
+     NSDictionary    |   Object object
+       NSArray       |    Array object
+        NSDate       |     Date object
+       NSBlock (1)   |   Function object (1)
+          id (2)     |   Wrapper object (2)
+        Class (3)    | Constructor object (3)
+@/textblock
+
+ + (1) Instances of NSBlock with supported arguments types will be presented to + JavaScript as a callable Function object. For more information on supported + argument types see JSExport.h. If a JavaScript Function originating from an + Objective-C block is converted back to an Objective-C object the block will + be returned. All other JavaScript functions will be converted in the same + manner as a JavaScript object of type Object. + + (2) For Objective-C instances that do not derive from the set of types listed + above, a wrapper object to provide a retaining handle to the Objective-C + instance from JavaScript. For more information on these wrapper objects, see + JSExport.h. When a JavaScript wrapper object is converted back to Objective-C + the Objective-C instance being retained by the wrapper is returned. + + (3) For Objective-C Class objects a constructor object containing exported + class methods will be returned. See JSExport.h for more information on + constructor objects. + + For all methods taking arguments of type id, arguments will be converted + into a JavaScript value according to the above conversion. +*/ +/*! +@method +@abstract Convert this JSValue to an Objective-C object. +@discussion The JSValue is converted to an Objective-C object according + to the conversion rules specified above. +@result The Objective-C representation of this JSValue. +*/ +- (id)toObject; + +/*! +@method +@abstract Convert a JSValue to an Objective-C object of a specific class. +@discussion The JSValue is converted to an Objective-C object of the specified Class. + If the result is not of the specified Class then nil will be returned. +@result An Objective-C object of the specified Class or nil. +*/ +- (id)toObjectOfClass:(Class)expectedClass; + +/*! +@method +@abstract Convert a JSValue to a boolean. +@discussion The JSValue is converted to a boolean according to the rules specified + by the JavaScript language. +@result The boolean result of the conversion. +*/ +- (BOOL)toBool; + +/*! +@method +@abstract Convert a JSValue to a double. +@discussion The JSValue is converted to a number according to the rules specified + by the JavaScript language. +@result The double result of the conversion. +*/ +- (double)toDouble; + +/*! +@method +@abstract Convert a JSValue to an int32_t. +@discussion The JSValue is converted to an integer according to the rules specified + by the JavaScript language. +@result The int32_t result of the conversion. +*/ +- (int32_t)toInt32; + +/*! +@method +@abstract Convert a JSValue to a uint32_t. +@discussion The JSValue is converted to an integer according to the rules specified + by the JavaScript language. +@result The uint32_t result of the conversion. +*/ +- (uint32_t)toUInt32; + +/*! +@method +@abstract Convert a JSValue to a NSNumber. +@discussion If the JSValue represents a boolean, a NSNumber value of YES or NO + will be returned. For all other types the value will be converted to a number according + to the rules specified by the JavaScript language. +@result The NSNumber result of the conversion. +*/ +- (NSNumber *)toNumber; + +/*! +@method +@abstract Convert a JSValue to a NSString. +@discussion The JSValue is converted to a string according to the rules specified + by the JavaScript language. +@result The NSString containing the result of the conversion. +*/ +- (NSString *)toString; + +/*! +@method +@abstract Convert a JSValue to a NSDate. +@discussion The value is converted to a number representing a time interval + since 1970 which is then used to create a new NSDate instance. +@result The NSDate created using the converted time interval. +*/ +- (NSDate *)toDate; + +/*! +@method +@abstract Convert a JSValue to a NSArray. +@discussion If the value is null or undefined then nil is returned. + If the value is not an object then a JavaScript TypeError will be thrown. + The property length is read from the object, converted to an unsigned + integer, and an NSArray of this size is allocated. Properties corresponding + to indicies within the array bounds will be copied to the array, with + JSValues converted to equivalent Objective-C objects as specified. +@result The NSArray containing the recursively converted contents of the + converted JavaScript array. +*/ +- (NSArray *)toArray; + +/*! +@method +@abstract Convert a JSValue to a NSDictionary. +@discussion If the value is null or undefined then nil is returned. + If the value is not an object then a JavaScript TypeError will be thrown. + All enumerable properties of the object are copied to the dictionary, with + JSValues converted to equivalent Objective-C objects as specified. +@result The NSDictionary containing the recursively converted contents of + the converted JavaScript object. +*/ +- (NSDictionary *)toDictionary; + +/*! +@methodgroup Accessing Properties +*/ +/*! +@method +@abstract Access a property of a JSValue. +@result The JSValue for the requested property or the JSValue undefined + if the property does not exist. +*/ +- (JSValue *)valueForProperty:(NSString *)property; + +/*! +@method +@abstract Set a property on a JSValue. +*/ +- (void)setValue:(id)value forProperty:(NSString *)property; + +/*! +@method +@abstract Delete a property from a JSValue. +@result YES if deletion is successful, NO otherwise. +*/ +- (BOOL)deleteProperty:(NSString *)property; + +/*! +@method +@abstract Check if a JSValue has a property. +@discussion This method has the same function as the JavaScript operator in. +@result Returns YES if property is present on the value. +*/ +- (BOOL)hasProperty:(NSString *)property; + +/*! +@method +@abstract Define properties with custom descriptors on JSValues. +@discussion This method may be used to create a data or accessor property on an object. + This method operates in accordance with the Object.defineProperty method in the + JavaScript language. +*/ +- (void)defineProperty:(NSString *)property descriptor:(id)descriptor; + +/*! +@method +@abstract Access an indexed (numerical) property on a JSValue. +@result The JSValue for the property at the specified index. + Returns the JavaScript value undefined if no property exists at that index. +*/ +- (JSValue *)valueAtIndex:(NSUInteger)index; + +/*! +@method +@abstract Set an indexed (numerical) property on a JSValue. +@discussion For JSValues that are JavaScript arrays, indices greater than + UINT_MAX - 1 will not affect the length of the array. +*/ +- (void)setValue:(id)value atIndex:(NSUInteger)index; + +/*! +@functiongroup Checking JavaScript Types +*/ + +/*! +@property +@abstract Check if a JSValue corresponds to the JavaScript value undefined. +*/ +@property (readonly) BOOL isUndefined; + +/*! +@property +@abstract Check if a JSValue corresponds to the JavaScript value null. +*/ +@property (readonly) BOOL isNull; + +/*! +@property +@abstract Check if a JSValue is a boolean. +*/ +@property (readonly) BOOL isBoolean; + +/*! +@property +@abstract Check if a JSValue is a number. +@discussion In JavaScript, there is no differentiation between types of numbers. + Semantically all numbers behave like doubles except in special cases like bit + operations. +*/ +@property (readonly) BOOL isNumber; + +/*! +@property +@abstract Check if a JSValue is a string. +*/ +@property (readonly) BOOL isString; + +/*! +@property +@abstract Check if a JSValue is an object. +*/ +@property (readonly) BOOL isObject; + +/*! +@property +@abstract Check if a JSValue is an array. +*/ +@property (readonly) BOOL isArray NS_AVAILABLE(10_11, 9_0); + +/*! +@property +@abstract Check if a JSValue is a date. +*/ +@property (readonly) BOOL isDate NS_AVAILABLE(10_11, 9_0); + +/*! +@method +@abstract Compare two JSValues using JavaScript's === operator. +*/ +- (BOOL)isEqualToObject:(id)value; + +/*! +@method +@abstract Compare two JSValues using JavaScript's == operator. +*/ +- (BOOL)isEqualWithTypeCoercionToObject:(id)value; + +/*! +@method +@abstract Check if a JSValue is an instance of another object. +@discussion This method has the same function as the JavaScript operator instanceof. + If an object other than a JSValue is passed, it will first be converted according to + the aforementioned rules. +*/ +- (BOOL)isInstanceOf:(id)value; + +/*! +@methodgroup Calling Functions and Constructors +*/ +/*! +@method +@abstract Invoke a JSValue as a function. +@discussion In JavaScript, if a function doesn't explicitly return a value then it + implicitly returns the JavaScript value undefined. +@param arguments The arguments to pass to the function. +@result The return value of the function call. +*/ +- (JSValue *)callWithArguments:(NSArray *)arguments; + +/*! +@method +@abstract Invoke a JSValue as a constructor. +@discussion This is equivalent to using the new syntax in JavaScript. +@param arguments The arguments to pass to the constructor. +@result The return value of the constructor call. +*/ +- (JSValue *)constructWithArguments:(NSArray *)arguments; + +/*! +@method +@abstract Invoke a method on a JSValue. +@discussion Accesses the property named method from this value and + calls the resulting value as a function, passing this JSValue as the this + value along with the specified arguments. +@param method The name of the method to be invoked. +@param arguments The arguments to pass to the method. +@result The return value of the method call. +*/ +- (JSValue *)invokeMethod:(NSString *)method withArguments:(NSArray *)arguments; + +@end + +/*! +@category +@discussion Objective-C methods exported to JavaScript may have argument and/or return + values of struct types, provided that conversion to and from the struct is + supported by JSValue. Support is provided for any types where JSValue + contains both a class method valueWith:inContext:, and and instance + method to- where the string in these selector names match, + with the first argument to the former being of the same struct type as the + return type of the latter. + Support is provided for structs of type CGPoint, NSRange, CGRect and CGSize. +*/ +@interface JSValue (StructSupport) + +/*! +@method +@abstract Create a JSValue from a CGPoint. +@result A newly allocated JavaScript object containing properties + named x and y, with values from the CGPoint. +*/ ++ (JSValue *)valueWithPoint:(CGPoint)point inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JSValue from a NSRange. +@result A newly allocated JavaScript object containing properties + named location and length, with values from the NSRange. +*/ ++ (JSValue *)valueWithRange:(NSRange)range inContext:(JSContext *)context; + +/*! +@method +@abstract +Create a JSValue from a CGRect. +@result A newly allocated JavaScript object containing properties + named x, y, width, and height, with values from the CGRect. +*/ ++ (JSValue *)valueWithRect:(CGRect)rect inContext:(JSContext *)context; + +/*! +@method +@abstract Create a JSValue from a CGSize. +@result A newly allocated JavaScript object containing properties + named width and height, with values from the CGSize. +*/ ++ (JSValue *)valueWithSize:(CGSize)size inContext:(JSContext *)context; + +/*! +@method +@abstract Convert a JSValue to a CGPoint. +@discussion Reads the properties named x and y from + this JSValue, and converts the results to double. +@result The new CGPoint. +*/ +- (CGPoint)toPoint; + +/*! +@method +@abstract Convert a JSValue to an NSRange. +@discussion Reads the properties named location and + length from this JSValue and converts the results to double. +@result The new NSRange. +*/ +- (NSRange)toRange; + +/*! +@method +@abstract Convert a JSValue to a CGRect. +@discussion Reads the properties named x, y, + width, and height from this JSValue and converts the results to double. +@result The new CGRect. +*/ +- (CGRect)toRect; + +/*! +@method +@abstract Convert a JSValue to a CGSize. +@discussion Reads the properties named width and + height from this JSValue and converts the results to double. +@result The new CGSize. +*/ +- (CGSize)toSize; + +@end + +/*! +@category +@discussion Instances of JSValue implement the following methods in order to enable + support for subscript access by key and index, for example: + +@textblock + JSValue *objectA, *objectB; + JSValue *v1 = object[@"X"]; // Get value for property "X" from 'object'. + JSValue *v2 = object[42]; // Get value for index 42 from 'object'. + object[@"Y"] = v1; // Assign 'v1' to property "Y" of 'object'. + object[101] = v2; // Assign 'v2' to index 101 of 'object'. +@/textblock + + An object key passed as a subscript will be converted to a JavaScript value, + and then the value converted to a string used as a property name. +*/ +@interface JSValue (SubscriptSupport) + +- (JSValue *)objectForKeyedSubscript:(id)key; +- (JSValue *)objectAtIndexedSubscript:(NSUInteger)index; +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key; +- (void)setObject:(id)object atIndexedSubscript:(NSUInteger)index; + +@end + +/*! +@category +@discussion These functions are for bridging between the C API and the Objective-C API. +*/ +@interface JSValue (JSValueRefSupport) + +/*! +@method +@abstract Creates a JSValue, wrapping its C API counterpart. +@result The Objective-C API equivalent of the specified JSValueRef. +*/ ++ (JSValue *)valueWithJSValueRef:(JSValueRef)value inContext:(JSContext *)context; + +/*! +@property +@abstract Returns the C API counterpart wrapped by a JSContext. +@result The C API equivalent of this JSValue. +*/ +@property (readonly) JSValueRef JSValueRef; +@end + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@group Property Descriptor Constants +@discussion These keys may assist in creating a property descriptor for use with the + defineProperty method on JSValue. + Property descriptors must fit one of three descriptions: + + Data Descriptor: + - A descriptor containing one or both of the keys value and writable, + and optionally containing one or both of the keys enumerable and + configurable. A data descriptor may not contain either the get or + set key. + A data descriptor may be used to create or modify the attributes of a + data property on an object (replacing any existing accessor property). + + Accessor Descriptor: + - A descriptor containing one or both of the keys get and set, and + optionally containing one or both of the keys enumerable and + configurable. An accessor descriptor may not contain either the value + or writable key. + An accessor descriptor may be used to create or modify the attributes of + an accessor property on an object (replacing any existing data property). + + Generic Descriptor: + - A descriptor containing one or both of the keys enumerable and + configurable. A generic descriptor may not contain any of the keys + value, writable, get, or set. + A generic descriptor may be used to modify the attributes of an existing + data or accessor property, or to create a new data property. +*/ +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorWritableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorEnumerableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorConfigurableKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorValueKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorGetKey; +/*! +@const +*/ +JS_EXPORT extern NSString * const JSPropertyDescriptorSetKey; + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif + +#endif // JSValue_h diff --git a/API/JSValue.mm b/API/JSValue.mm new file mode 100644 index 0000000..bab2862 --- /dev/null +++ b/API/JSValue.mm @@ -0,0 +1,1181 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#import "APICast.h" +#import "DateInstance.h" +#import "Error.h" +#import "Exception.h" +#import "JavaScriptCore.h" +#import "JSContextInternal.h" +#import "JSVirtualMachineInternal.h" +#import "JSValueInternal.h" +#import "JSWrapperMap.h" +#import "ObjcRuntimeExtras.h" +#import "JSCInlines.h" +#import "JSCJSValue.h" +#import "Strong.h" +#import "StrongInlines.h" +#import +#import +#import +#import +#import +#import +#import + +#if ENABLE(REMOTE_INSPECTOR) +#import "CallFrame.h" +#import "JSGlobalObject.h" +#import "JSGlobalObjectInspectorController.h" +#endif + +#if JSC_OBJC_API_ENABLED + +NSString * const JSPropertyDescriptorWritableKey = @"writable"; +NSString * const JSPropertyDescriptorEnumerableKey = @"enumerable"; +NSString * const JSPropertyDescriptorConfigurableKey = @"configurable"; +NSString * const JSPropertyDescriptorValueKey = @"value"; +NSString * const JSPropertyDescriptorGetKey = @"get"; +NSString * const JSPropertyDescriptorSetKey = @"set"; + +@implementation JSValue { + JSValueRef m_value; +} + +- (JSValueRef)JSValueRef +{ + return m_value; +} + ++ (JSValue *)valueWithObject:(id)value inContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:objectToValue(context, value) inContext:context]; +} + ++ (JSValue *)valueWithBool:(BOOL)value inContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeBoolean([context JSGlobalContextRef], value) inContext:context]; +} + ++ (JSValue *)valueWithDouble:(double)value inContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeNumber([context JSGlobalContextRef], value) inContext:context]; +} + ++ (JSValue *)valueWithInt32:(int32_t)value inContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeNumber([context JSGlobalContextRef], value) inContext:context]; +} + ++ (JSValue *)valueWithUInt32:(uint32_t)value inContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeNumber([context JSGlobalContextRef], value) inContext:context]; +} + ++ (JSValue *)valueWithNewObjectInContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSObjectMake([context JSGlobalContextRef], 0, 0) inContext:context]; +} + ++ (JSValue *)valueWithNewArrayInContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSObjectMakeArray([context JSGlobalContextRef], 0, NULL, 0) inContext:context]; +} + ++ (JSValue *)valueWithNewRegularExpressionFromPattern:(NSString *)pattern flags:(NSString *)flags inContext:(JSContext *)context +{ + JSStringRef patternString = JSStringCreateWithCFString((CFStringRef)pattern); + JSStringRef flagsString = JSStringCreateWithCFString((CFStringRef)flags); + JSValueRef arguments[2] = { JSValueMakeString([context JSGlobalContextRef], patternString), JSValueMakeString([context JSGlobalContextRef], flagsString) }; + JSStringRelease(patternString); + JSStringRelease(flagsString); + + return [JSValue valueWithJSValueRef:JSObjectMakeRegExp([context JSGlobalContextRef], 2, arguments, 0) inContext:context]; +} + ++ (JSValue *)valueWithNewErrorFromMessage:(NSString *)message inContext:(JSContext *)context +{ + JSStringRef string = JSStringCreateWithCFString((CFStringRef)message); + JSValueRef argument = JSValueMakeString([context JSGlobalContextRef], string); + JSStringRelease(string); + + return [JSValue valueWithJSValueRef:JSObjectMakeError([context JSGlobalContextRef], 1, &argument, 0) inContext:context]; +} + ++ (JSValue *)valueWithNullInContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeNull([context JSGlobalContextRef]) inContext:context]; +} + ++ (JSValue *)valueWithUndefinedInContext:(JSContext *)context +{ + return [JSValue valueWithJSValueRef:JSValueMakeUndefined([context JSGlobalContextRef]) inContext:context]; +} + +- (id)toObject +{ + return valueToObject(_context, m_value); +} + +- (id)toObjectOfClass:(Class)expectedClass +{ + id result = [self toObject]; + return [result isKindOfClass:expectedClass] ? result : nil; +} + +- (BOOL)toBool +{ + return JSValueToBoolean([_context JSGlobalContextRef], m_value); +} + +- (double)toDouble +{ + JSValueRef exception = 0; + double result = JSValueToNumber([_context JSGlobalContextRef], m_value, &exception); + if (exception) { + [_context notifyException:exception]; + return std::numeric_limits::quiet_NaN(); + } + + return result; +} + +- (int32_t)toInt32 +{ + return JSC::toInt32([self toDouble]); +} + +- (uint32_t)toUInt32 +{ + return JSC::toUInt32([self toDouble]); +} + +- (NSNumber *)toNumber +{ + JSValueRef exception = 0; + id result = valueToNumber([_context JSGlobalContextRef], m_value, &exception); + if (exception) + [_context notifyException:exception]; + return result; +} + +- (NSString *)toString +{ + JSValueRef exception = 0; + id result = valueToString([_context JSGlobalContextRef], m_value, &exception); + if (exception) + [_context notifyException:exception]; + return result; +} + +- (NSDate *)toDate +{ + JSValueRef exception = 0; + id result = valueToDate([_context JSGlobalContextRef], m_value, &exception); + if (exception) + [_context notifyException:exception]; + return result; +} + +- (NSArray *)toArray +{ + JSValueRef exception = 0; + id result = valueToArray([_context JSGlobalContextRef], m_value, &exception); + if (exception) + [_context notifyException:exception]; + return result; +} + +- (NSDictionary *)toDictionary +{ + JSValueRef exception = 0; + id result = valueToDictionary([_context JSGlobalContextRef], m_value, &exception); + if (exception) + [_context notifyException:exception]; + return result; +} + +- (JSValue *)valueForProperty:(NSString *)propertyName +{ + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSStringRef name = JSStringCreateWithCFString((CFStringRef)propertyName); + JSValueRef result = JSObjectGetProperty([_context JSGlobalContextRef], object, name, &exception); + JSStringRelease(name); + if (exception) + return [_context valueFromNotifyException:exception]; + + return [JSValue valueWithJSValueRef:result inContext:_context]; +} + +- (void)setValue:(id)value forProperty:(NSString *)propertyName +{ + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) { + [_context notifyException:exception]; + return; + } + + JSStringRef name = JSStringCreateWithCFString((CFStringRef)propertyName); + JSObjectSetProperty([_context JSGlobalContextRef], object, name, objectToValue(_context, value), 0, &exception); + JSStringRelease(name); + if (exception) { + [_context notifyException:exception]; + return; + } +} + +- (BOOL)deleteProperty:(NSString *)propertyName +{ + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context boolFromNotifyException:exception]; + + JSStringRef name = JSStringCreateWithCFString((CFStringRef)propertyName); + BOOL result = JSObjectDeleteProperty([_context JSGlobalContextRef], object, name, &exception); + JSStringRelease(name); + if (exception) + return [_context boolFromNotifyException:exception]; + + return result; +} + +- (BOOL)hasProperty:(NSString *)propertyName +{ + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context boolFromNotifyException:exception]; + + JSStringRef name = JSStringCreateWithCFString((CFStringRef)propertyName); + BOOL result = JSObjectHasProperty([_context JSGlobalContextRef], object, name); + JSStringRelease(name); + return result; +} + +- (void)defineProperty:(NSString *)property descriptor:(id)descriptor +{ + [[_context globalObject][@"Object"] invokeMethod:@"defineProperty" withArguments:@[ self, property, descriptor ]]; +} + +- (JSValue *)valueAtIndex:(NSUInteger)index +{ + // Properties that are higher than an unsigned value can hold are converted to a double then inserted as a normal property. + // Indices that are bigger than the max allowed index size (UINT_MAX - 1) will be handled internally in get(). + if (index != (unsigned)index) + return [self valueForProperty:[[JSValue valueWithDouble:index inContext:_context] toString]]; + + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSValueRef result = JSObjectGetPropertyAtIndex([_context JSGlobalContextRef], object, (unsigned)index, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + return [JSValue valueWithJSValueRef:result inContext:_context]; +} + +- (void)setValue:(id)value atIndex:(NSUInteger)index +{ + // Properties that are higher than an unsigned value can hold are converted to a double, then inserted as a normal property. + // Indices that are bigger than the max allowed index size (UINT_MAX - 1) will be handled internally in putByIndex(). + if (index != (unsigned)index) + return [self setValue:value forProperty:[[JSValue valueWithDouble:index inContext:_context] toString]]; + + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) { + [_context notifyException:exception]; + return; + } + + JSObjectSetPropertyAtIndex([_context JSGlobalContextRef], object, (unsigned)index, objectToValue(_context, value), &exception); + if (exception) { + [_context notifyException:exception]; + return; + } +} + +- (BOOL)isUndefined +{ + return JSValueIsUndefined([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isNull +{ + return JSValueIsNull([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isBoolean +{ + return JSValueIsBoolean([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isNumber +{ + return JSValueIsNumber([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isString +{ + return JSValueIsString([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isObject +{ + return JSValueIsObject([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isArray +{ + return JSValueIsArray([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isDate +{ + return JSValueIsDate([_context JSGlobalContextRef], m_value); +} + +- (BOOL)isEqualToObject:(id)value +{ + return JSValueIsStrictEqual([_context JSGlobalContextRef], m_value, objectToValue(_context, value)); +} + +- (BOOL)isEqualWithTypeCoercionToObject:(id)value +{ + JSValueRef exception = 0; + BOOL result = JSValueIsEqual([_context JSGlobalContextRef], m_value, objectToValue(_context, value), &exception); + if (exception) + return [_context boolFromNotifyException:exception]; + + return result; +} + +- (BOOL)isInstanceOf:(id)value +{ + JSValueRef exception = 0; + JSObjectRef constructor = JSValueToObject([_context JSGlobalContextRef], objectToValue(_context, value), &exception); + if (exception) + return [_context boolFromNotifyException:exception]; + + BOOL result = JSValueIsInstanceOfConstructor([_context JSGlobalContextRef], m_value, constructor, &exception); + if (exception) + return [_context boolFromNotifyException:exception]; + + return result; +} + +- (JSValue *)callWithArguments:(NSArray *)argumentArray +{ + NSUInteger argumentCount = [argumentArray count]; + JSValueRef arguments[argumentCount]; + for (unsigned i = 0; i < argumentCount; ++i) + arguments[i] = objectToValue(_context, [argumentArray objectAtIndex:i]); + + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSValueRef result = JSObjectCallAsFunction([_context JSGlobalContextRef], object, 0, argumentCount, arguments, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + return [JSValue valueWithJSValueRef:result inContext:_context]; +} + +- (JSValue *)constructWithArguments:(NSArray *)argumentArray +{ + NSUInteger argumentCount = [argumentArray count]; + JSValueRef arguments[argumentCount]; + for (unsigned i = 0; i < argumentCount; ++i) + arguments[i] = objectToValue(_context, [argumentArray objectAtIndex:i]); + + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSObjectRef result = JSObjectCallAsConstructor([_context JSGlobalContextRef], object, argumentCount, arguments, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + return [JSValue valueWithJSValueRef:result inContext:_context]; +} + +- (JSValue *)invokeMethod:(NSString *)method withArguments:(NSArray *)arguments +{ + NSUInteger argumentCount = [arguments count]; + JSValueRef argumentArray[argumentCount]; + for (unsigned i = 0; i < argumentCount; ++i) + argumentArray[i] = objectToValue(_context, [arguments objectAtIndex:i]); + + JSValueRef exception = 0; + JSObjectRef thisObject = JSValueToObject([_context JSGlobalContextRef], m_value, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSStringRef name = JSStringCreateWithCFString((CFStringRef)method); + JSValueRef function = JSObjectGetProperty([_context JSGlobalContextRef], thisObject, name, &exception); + JSStringRelease(name); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSObjectRef object = JSValueToObject([_context JSGlobalContextRef], function, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + JSValueRef result = JSObjectCallAsFunction([_context JSGlobalContextRef], object, thisObject, argumentCount, argumentArray, &exception); + if (exception) + return [_context valueFromNotifyException:exception]; + + return [JSValue valueWithJSValueRef:result inContext:_context]; +} + +@end + +@implementation JSValue(StructSupport) + +- (CGPoint)toPoint +{ + return (CGPoint){ + static_cast([self[@"x"] toDouble]), + static_cast([self[@"y"] toDouble]) + }; +} + +- (NSRange)toRange +{ + return (NSRange){ + [[self[@"location"] toNumber] unsignedIntegerValue], + [[self[@"length"] toNumber] unsignedIntegerValue] + }; +} + +- (CGRect)toRect +{ + return (CGRect){ + [self toPoint], + [self toSize] + }; +} + +- (CGSize)toSize +{ + return (CGSize){ + static_cast([self[@"width"] toDouble]), + static_cast([self[@"height"] toDouble]) + }; +} + ++ (JSValue *)valueWithPoint:(CGPoint)point inContext:(JSContext *)context +{ + return [JSValue valueWithObject:@{ + @"x":@(point.x), + @"y":@(point.y) + } inContext:context]; +} + ++ (JSValue *)valueWithRange:(NSRange)range inContext:(JSContext *)context +{ + return [JSValue valueWithObject:@{ + @"location":@(range.location), + @"length":@(range.length) + } inContext:context]; +} + ++ (JSValue *)valueWithRect:(CGRect)rect inContext:(JSContext *)context +{ + return [JSValue valueWithObject:@{ + @"x":@(rect.origin.x), + @"y":@(rect.origin.y), + @"width":@(rect.size.width), + @"height":@(rect.size.height) + } inContext:context]; +} + ++ (JSValue *)valueWithSize:(CGSize)size inContext:(JSContext *)context +{ + return [JSValue valueWithObject:@{ + @"width":@(size.width), + @"height":@(size.height) + } inContext:context]; +} + +@end + +@implementation JSValue(SubscriptSupport) + +- (JSValue *)objectForKeyedSubscript:(id)key +{ + if (![key isKindOfClass:[NSString class]]) { + key = [[JSValue valueWithObject:key inContext:_context] toString]; + if (!key) + return [JSValue valueWithUndefinedInContext:_context]; + } + + return [self valueForProperty:(NSString *)key]; +} + +- (JSValue *)objectAtIndexedSubscript:(NSUInteger)index +{ + return [self valueAtIndex:index]; +} + +- (void)setObject:(id)object forKeyedSubscript:(NSObject *)key +{ + if (![key isKindOfClass:[NSString class]]) { + key = [[JSValue valueWithObject:key inContext:_context] toString]; + if (!key) + return; + } + + [self setValue:object forProperty:(NSString *)key]; +} + +- (void)setObject:(id)object atIndexedSubscript:(NSUInteger)index +{ + [self setValue:object atIndex:index]; +} + +@end + +inline bool isDate(JSObjectRef object, JSGlobalContextRef context) +{ + JSC::JSLockHolder locker(toJS(context)); + return toJS(object)->inherits(JSC::DateInstance::info()); +} + +inline bool isArray(JSObjectRef object, JSGlobalContextRef context) +{ + JSC::JSLockHolder locker(toJS(context)); + return toJS(object)->inherits(JSC::JSArray::info()); +} + +@implementation JSValue(Internal) + +enum ConversionType { + ContainerNone, + ContainerArray, + ContainerDictionary +}; + +class JSContainerConvertor { +public: + struct Task { + JSValueRef js; + id objc; + ConversionType type; + }; + + JSContainerConvertor(JSGlobalContextRef context) + : m_context(context) + { + } + + id convert(JSValueRef property); + void add(Task); + Task take(); + bool isWorkListEmpty() const { return !m_worklist.size(); } + +private: + JSGlobalContextRef m_context; + HashMap m_objectMap; + Vector m_worklist; + Vector> m_jsValues; +}; + +inline id JSContainerConvertor::convert(JSValueRef value) +{ + HashMap::iterator iter = m_objectMap.find(value); + if (iter != m_objectMap.end()) + return iter->value; + + Task result = valueToObjectWithoutCopy(m_context, value); + if (result.js) + add(result); + return result.objc; +} + +void JSContainerConvertor::add(Task task) +{ + JSC::ExecState* exec = toJS(m_context); + m_jsValues.append(JSC::Strong(exec->vm(), toJSForGC(exec, task.js))); + m_objectMap.add(task.js, task.objc); + if (task.type != ContainerNone) + m_worklist.append(task); +} + +JSContainerConvertor::Task JSContainerConvertor::take() +{ + ASSERT(!isWorkListEmpty()); + Task last = m_worklist.last(); + m_worklist.removeLast(); + return last; +} + +#if ENABLE(REMOTE_INSPECTOR) +static void reportExceptionToInspector(JSGlobalContextRef context, JSC::JSValue exceptionValue) +{ + JSC::ExecState* exec = toJS(context); + JSC::Exception* exception = JSC::Exception::create(exec->vm(), exceptionValue); + exec->vmEntryGlobalObject()->inspectorController().reportAPIException(exec, exception); +} +#endif + +static JSContainerConvertor::Task valueToObjectWithoutCopy(JSGlobalContextRef context, JSValueRef value) +{ + if (!JSValueIsObject(context, value)) { + id primitive; + if (JSValueIsBoolean(context, value)) + primitive = JSValueToBoolean(context, value) ? @YES : @NO; + else if (JSValueIsNumber(context, value)) { + // Normalize the number, so it will unique correctly in the hash map - + // it's nicer not to leak this internal implementation detail! + value = JSValueMakeNumber(context, JSValueToNumber(context, value, 0)); + primitive = [NSNumber numberWithDouble:JSValueToNumber(context, value, 0)]; + } else if (JSValueIsString(context, value)) { + // Would be nice to unique strings, too. + JSStringRef jsstring = JSValueToStringCopy(context, value, 0); + NSString * stringNS = (NSString *)JSStringCopyCFString(kCFAllocatorDefault, jsstring); + JSStringRelease(jsstring); + primitive = [stringNS autorelease]; + } else if (JSValueIsNull(context, value)) + primitive = [NSNull null]; + else { + ASSERT(JSValueIsUndefined(context, value)); + primitive = nil; + } + return (JSContainerConvertor::Task){ value, primitive, ContainerNone }; + } + + JSObjectRef object = JSValueToObject(context, value, 0); + + if (id wrapped = tryUnwrapObjcObject(context, object)) + return (JSContainerConvertor::Task){ object, wrapped, ContainerNone }; + + if (isDate(object, context)) + return (JSContainerConvertor::Task){ object, [NSDate dateWithTimeIntervalSince1970:JSValueToNumber(context, object, 0) / 1000.0], ContainerNone }; + + if (isArray(object, context)) + return (JSContainerConvertor::Task){ object, [NSMutableArray array], ContainerArray }; + + return (JSContainerConvertor::Task){ object, [NSMutableDictionary dictionary], ContainerDictionary }; +} + +static id containerValueToObject(JSGlobalContextRef context, JSContainerConvertor::Task task) +{ + ASSERT(task.type != ContainerNone); + JSC::JSLockHolder locker(toJS(context)); + JSContainerConvertor convertor(context); + convertor.add(task); + ASSERT(!convertor.isWorkListEmpty()); + + do { + JSContainerConvertor::Task current = convertor.take(); + ASSERT(JSValueIsObject(context, current.js)); + JSObjectRef js = JSValueToObject(context, current.js, 0); + + if (current.type == ContainerArray) { + ASSERT([current.objc isKindOfClass:[NSMutableArray class]]); + NSMutableArray *array = (NSMutableArray *)current.objc; + + JSStringRef lengthString = JSStringCreateWithUTF8CString("length"); + unsigned length = JSC::toUInt32(JSValueToNumber(context, JSObjectGetProperty(context, js, lengthString, 0), 0)); + JSStringRelease(lengthString); + + for (unsigned i = 0; i < length; ++i) { + id objc = convertor.convert(JSObjectGetPropertyAtIndex(context, js, i, 0)); + [array addObject:objc ? objc : [NSNull null]]; + } + } else { + ASSERT([current.objc isKindOfClass:[NSMutableDictionary class]]); + NSMutableDictionary *dictionary = (NSMutableDictionary *)current.objc; + + JSC::JSLockHolder locker(toJS(context)); + + JSPropertyNameArrayRef propertyNameArray = JSObjectCopyPropertyNames(context, js); + size_t length = JSPropertyNameArrayGetCount(propertyNameArray); + + for (size_t i = 0; i < length; ++i) { + JSStringRef propertyName = JSPropertyNameArrayGetNameAtIndex(propertyNameArray, i); + if (id objc = convertor.convert(JSObjectGetProperty(context, js, propertyName, 0))) + dictionary[[(NSString *)JSStringCopyCFString(kCFAllocatorDefault, propertyName) autorelease]] = objc; + } + + JSPropertyNameArrayRelease(propertyNameArray); + } + + } while (!convertor.isWorkListEmpty()); + + return task.objc; +} + +id valueToObject(JSContext *context, JSValueRef value) +{ + JSContainerConvertor::Task result = valueToObjectWithoutCopy([context JSGlobalContextRef], value); + if (result.type == ContainerNone) + return result.objc; + return containerValueToObject([context JSGlobalContextRef], result); +} + +id valueToNumber(JSGlobalContextRef context, JSValueRef value, JSValueRef* exception) +{ + ASSERT(!*exception); + if (id wrapped = tryUnwrapObjcObject(context, value)) { + if ([wrapped isKindOfClass:[NSNumber class]]) + return wrapped; + } + + if (JSValueIsBoolean(context, value)) + return JSValueToBoolean(context, value) ? @YES : @NO; + + double result = JSValueToNumber(context, value, exception); + return [NSNumber numberWithDouble:*exception ? std::numeric_limits::quiet_NaN() : result]; +} + +id valueToString(JSGlobalContextRef context, JSValueRef value, JSValueRef* exception) +{ + ASSERT(!*exception); + if (id wrapped = tryUnwrapObjcObject(context, value)) { + if ([wrapped isKindOfClass:[NSString class]]) + return wrapped; + } + + JSStringRef jsstring = JSValueToStringCopy(context, value, exception); + if (*exception) { + ASSERT(!jsstring); + return nil; + } + + RetainPtr stringCF = adoptCF(JSStringCopyCFString(kCFAllocatorDefault, jsstring)); + JSStringRelease(jsstring); + return (NSString *)stringCF.autorelease(); +} + +id valueToDate(JSGlobalContextRef context, JSValueRef value, JSValueRef* exception) +{ + ASSERT(!*exception); + if (id wrapped = tryUnwrapObjcObject(context, value)) { + if ([wrapped isKindOfClass:[NSDate class]]) + return wrapped; + } + + double result = JSValueToNumber(context, value, exception) / 1000.0; + return *exception ? nil : [NSDate dateWithTimeIntervalSince1970:result]; +} + +id valueToArray(JSGlobalContextRef context, JSValueRef value, JSValueRef* exception) +{ + ASSERT(!*exception); + if (id wrapped = tryUnwrapObjcObject(context, value)) { + if ([wrapped isKindOfClass:[NSArray class]]) + return wrapped; + } + + if (JSValueIsObject(context, value)) + return containerValueToObject(context, (JSContainerConvertor::Task){ value, [NSMutableArray array], ContainerArray}); + + JSC::JSLockHolder locker(toJS(context)); + if (!(JSValueIsNull(context, value) || JSValueIsUndefined(context, value))) { + JSC::JSObject* exceptionObject = JSC::createTypeError(toJS(context), ASCIILiteral("Cannot convert primitive to NSArray")); + *exception = toRef(exceptionObject); +#if ENABLE(REMOTE_INSPECTOR) + reportExceptionToInspector(context, exceptionObject); +#endif + } + return nil; +} + +id valueToDictionary(JSGlobalContextRef context, JSValueRef value, JSValueRef* exception) +{ + ASSERT(!*exception); + if (id wrapped = tryUnwrapObjcObject(context, value)) { + if ([wrapped isKindOfClass:[NSDictionary class]]) + return wrapped; + } + + if (JSValueIsObject(context, value)) + return containerValueToObject(context, (JSContainerConvertor::Task){ value, [NSMutableDictionary dictionary], ContainerDictionary}); + + JSC::JSLockHolder locker(toJS(context)); + if (!(JSValueIsNull(context, value) || JSValueIsUndefined(context, value))) { + JSC::JSObject* exceptionObject = JSC::createTypeError(toJS(context), ASCIILiteral("Cannot convert primitive to NSDictionary")); + *exception = toRef(exceptionObject); +#if ENABLE(REMOTE_INSPECTOR) + reportExceptionToInspector(context, exceptionObject); +#endif + } + return nil; +} + +class ObjcContainerConvertor { +public: + struct Task { + id objc; + JSValueRef js; + ConversionType type; + }; + + ObjcContainerConvertor(JSContext *context) + : m_context(context) + { + } + + JSValueRef convert(id object); + void add(Task); + Task take(); + bool isWorkListEmpty() const { return !m_worklist.size(); } + +private: + JSContext *m_context; + HashMap m_objectMap; + Vector m_worklist; + Vector> m_jsValues; +}; + +JSValueRef ObjcContainerConvertor::convert(id object) +{ + ASSERT(object); + + auto it = m_objectMap.find(object); + if (it != m_objectMap.end()) + return it->value; + + ObjcContainerConvertor::Task task = objectToValueWithoutCopy(m_context, object); + add(task); + return task.js; +} + +void ObjcContainerConvertor::add(ObjcContainerConvertor::Task task) +{ + JSC::ExecState* exec = toJS(m_context.JSGlobalContextRef); + m_jsValues.append(JSC::Strong(exec->vm(), toJSForGC(exec, task.js))); + m_objectMap.add(task.objc, task.js); + if (task.type != ContainerNone) + m_worklist.append(task); +} + +ObjcContainerConvertor::Task ObjcContainerConvertor::take() +{ + ASSERT(!isWorkListEmpty()); + Task last = m_worklist.last(); + m_worklist.removeLast(); + return last; +} + +inline bool isNSBoolean(id object) +{ + ASSERT([@YES class] == [@NO class]); + ASSERT([@YES class] != [NSNumber class]); + ASSERT([[@YES class] isSubclassOfClass:[NSNumber class]]); + return [object isKindOfClass:[@YES class]]; +} + +static ObjcContainerConvertor::Task objectToValueWithoutCopy(JSContext *context, id object) +{ + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + + if (!object) + return (ObjcContainerConvertor::Task){ object, JSValueMakeUndefined(contextRef), ContainerNone }; + + if (!class_conformsToProtocol(object_getClass(object), getJSExportProtocol())) { + if ([object isKindOfClass:[NSArray class]]) + return (ObjcContainerConvertor::Task){ object, JSObjectMakeArray(contextRef, 0, NULL, 0), ContainerArray }; + + if ([object isKindOfClass:[NSDictionary class]]) + return (ObjcContainerConvertor::Task){ object, JSObjectMake(contextRef, 0, 0), ContainerDictionary }; + + if ([object isKindOfClass:[NSNull class]]) + return (ObjcContainerConvertor::Task){ object, JSValueMakeNull(contextRef), ContainerNone }; + + if ([object isKindOfClass:[JSValue class]]) + return (ObjcContainerConvertor::Task){ object, ((JSValue *)object)->m_value, ContainerNone }; + + if ([object isKindOfClass:[NSString class]]) { + JSStringRef string = JSStringCreateWithCFString((CFStringRef)object); + JSValueRef js = JSValueMakeString(contextRef, string); + JSStringRelease(string); + return (ObjcContainerConvertor::Task){ object, js, ContainerNone }; + } + + if ([object isKindOfClass:[NSNumber class]]) { + if (isNSBoolean(object)) + return (ObjcContainerConvertor::Task){ object, JSValueMakeBoolean(contextRef, [object boolValue]), ContainerNone }; + return (ObjcContainerConvertor::Task){ object, JSValueMakeNumber(contextRef, [object doubleValue]), ContainerNone }; + } + + if ([object isKindOfClass:[NSDate class]]) { + JSValueRef argument = JSValueMakeNumber(contextRef, [object timeIntervalSince1970] * 1000.0); + JSObjectRef result = JSObjectMakeDate(contextRef, 1, &argument, 0); + return (ObjcContainerConvertor::Task){ object, result, ContainerNone }; + } + + if ([object isKindOfClass:[JSManagedValue class]]) { + JSValue *value = [static_cast(object) value]; + if (!value) + return (ObjcContainerConvertor::Task) { object, JSValueMakeUndefined(contextRef), ContainerNone }; + return (ObjcContainerConvertor::Task){ object, value->m_value, ContainerNone }; + } + } + + return (ObjcContainerConvertor::Task){ object, valueInternalValue([context wrapperForObjCObject:object]), ContainerNone }; +} + +JSValueRef objectToValue(JSContext *context, id object) +{ + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + + ObjcContainerConvertor::Task task = objectToValueWithoutCopy(context, object); + if (task.type == ContainerNone) + return task.js; + + JSC::JSLockHolder locker(toJS(contextRef)); + ObjcContainerConvertor convertor(context); + convertor.add(task); + ASSERT(!convertor.isWorkListEmpty()); + + do { + ObjcContainerConvertor::Task current = convertor.take(); + ASSERT(JSValueIsObject(contextRef, current.js)); + JSObjectRef js = JSValueToObject(contextRef, current.js, 0); + + if (current.type == ContainerArray) { + ASSERT([current.objc isKindOfClass:[NSArray class]]); + NSArray *array = (NSArray *)current.objc; + NSUInteger count = [array count]; + for (NSUInteger index = 0; index < count; ++index) + JSObjectSetPropertyAtIndex(contextRef, js, index, convertor.convert([array objectAtIndex:index]), 0); + } else { + ASSERT(current.type == ContainerDictionary); + ASSERT([current.objc isKindOfClass:[NSDictionary class]]); + NSDictionary *dictionary = (NSDictionary *)current.objc; + for (id key in [dictionary keyEnumerator]) { + if ([key isKindOfClass:[NSString class]]) { + JSStringRef propertyName = JSStringCreateWithCFString((CFStringRef)key); + JSObjectSetProperty(contextRef, js, propertyName, convertor.convert([dictionary objectForKey:key]), 0, 0); + JSStringRelease(propertyName); + } + } + } + + } while (!convertor.isWorkListEmpty()); + + return task.js; +} + +JSValueRef valueInternalValue(JSValue * value) +{ + return value->m_value; +} + ++ (JSValue *)valueWithJSValueRef:(JSValueRef)value inContext:(JSContext *)context +{ + return [context wrapperForJSObject:value]; +} + +- (JSValue *)init +{ + return nil; +} + +- (JSValue *)initWithValue:(JSValueRef)value inContext:(JSContext *)context +{ + if (!value || !context) + return nil; + + self = [super init]; + if (!self) + return nil; + + _context = [context retain]; + m_value = value; + JSValueProtect([_context JSGlobalContextRef], m_value); + return self; +} + +struct StructTagHandler { + SEL typeToValueSEL; + SEL valueToTypeSEL; +}; +typedef HashMap StructHandlers; + +static StructHandlers* createStructHandlerMap() +{ + StructHandlers* structHandlers = new StructHandlers(); + + size_t valueWithXinContextLength = strlen("valueWithX:inContext:"); + size_t toXLength = strlen("toX"); + + // Step 1: find all valueWith:inContext: class methods in JSValue. + forEachMethodInClass(object_getClass([JSValue class]), ^(Method method){ + SEL selector = method_getName(method); + const char* name = sel_getName(selector); + size_t nameLength = strlen(name); + // Check for valueWith:context: + if (nameLength < valueWithXinContextLength || memcmp(name, "valueWith", 9) || memcmp(name + nameLength - 11, ":inContext:", 11)) + return; + // Check for [ id, SEL, , ] + if (method_getNumberOfArguments(method) != 4) + return; + char idType[3]; + // Check 2nd argument type is "@" + char* secondType = method_copyArgumentType(method, 3); + if (strcmp(secondType, "@") != 0) { + free(secondType); + return; + } + free(secondType); + // Check result type is also "@" + method_getReturnType(method, idType, 3); + if (strcmp(idType, "@") != 0) + return; + char* type = method_copyArgumentType(method, 2); + structHandlers->add(StringImpl::create(type), (StructTagHandler){ selector, 0 }); + free(type); + }); + + // Step 2: find all to instance methods in JSValue. + forEachMethodInClass([JSValue class], ^(Method method){ + SEL selector = method_getName(method); + const char* name = sel_getName(selector); + size_t nameLength = strlen(name); + // Check for to + if (nameLength < toXLength || memcmp(name, "to", 2)) + return; + // Check for [ id, SEL ] + if (method_getNumberOfArguments(method) != 2) + return; + // Try to find a matching valueWith:context: method. + char* type = method_copyReturnType(method); + + StructHandlers::iterator iter = structHandlers->find(type); + free(type); + if (iter == structHandlers->end()) + return; + StructTagHandler& handler = iter->value; + + // check that strlen() == strlen() + const char* valueWithName = sel_getName(handler.typeToValueSEL); + size_t valueWithLength = strlen(valueWithName); + if (valueWithLength - valueWithXinContextLength != nameLength - toXLength) + return; + // Check that == + if (memcmp(valueWithName + 9, name + 2, nameLength - toXLength - 1)) + return; + handler.valueToTypeSEL = selector; + }); + + // Step 3: clean up - remove entries where we found prospective valueWith:inContext: conversions, but no matching to methods. + typedef HashSet RemoveSet; + RemoveSet removeSet; + for (StructHandlers::iterator iter = structHandlers->begin(); iter != structHandlers->end(); ++iter) { + StructTagHandler& handler = iter->value; + if (!handler.valueToTypeSEL) + removeSet.add(iter->key); + } + + for (RemoveSet::iterator iter = removeSet.begin(); iter != removeSet.end(); ++iter) + structHandlers->remove(*iter); + + return structHandlers; +} + +static StructTagHandler* handerForStructTag(const char* encodedType) +{ + static StaticLock handerForStructTagLock; + LockHolder lockHolder(&handerForStructTagLock); + + static StructHandlers* structHandlers = createStructHandlerMap(); + + StructHandlers::iterator iter = structHandlers->find(encodedType); + if (iter == structHandlers->end()) + return 0; + return &iter->value; +} + ++ (SEL)selectorForStructToValue:(const char *)structTag +{ + StructTagHandler* handler = handerForStructTag(structTag); + return handler ? handler->typeToValueSEL : nil; +} + ++ (SEL)selectorForValueToStruct:(const char *)structTag +{ + StructTagHandler* handler = handerForStructTag(structTag); + return handler ? handler->valueToTypeSEL : nil; +} + +- (void)dealloc +{ + JSValueUnprotect([_context JSGlobalContextRef], m_value); + [_context release]; + _context = nil; + [super dealloc]; +} + +- (NSString *)description +{ + if (id wrapped = tryUnwrapObjcObject([_context JSGlobalContextRef], m_value)) + return [wrapped description]; + return [self toString]; +} + +NSInvocation *typeToValueInvocationFor(const char* encodedType) +{ + SEL selector = [JSValue selectorForStructToValue:encodedType]; + if (!selector) + return 0; + + const char* methodTypes = method_getTypeEncoding(class_getClassMethod([JSValue class], selector)); + NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:[NSMethodSignature signatureWithObjCTypes:methodTypes]]; + [invocation setSelector:selector]; + return invocation; +} + +NSInvocation *valueToTypeInvocationFor(const char* encodedType) +{ + SEL selector = [JSValue selectorForValueToStruct:encodedType]; + if (!selector) + return 0; + + const char* methodTypes = method_getTypeEncoding(class_getInstanceMethod([JSValue class], selector)); + NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:[NSMethodSignature signatureWithObjCTypes:methodTypes]]; + [invocation setSelector:selector]; + return invocation; +} + +@end + +#endif diff --git a/API/JSValueInternal.h b/API/JSValueInternal.h new file mode 100644 index 0000000..4f1a8f6 --- /dev/null +++ b/API/JSValueInternal.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSValueInternal_h +#define JSValueInternal_h + +#import +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSValue(Internal) + +JSValueRef valueInternalValue(JSValue *); + +- (JSValue *)initWithValue:(JSValueRef)value inContext:(JSContext *)context; + +JSValueRef objectToValue(JSContext *, id); +id valueToObject(JSContext *, JSValueRef); +id valueToNumber(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToString(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToDate(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToArray(JSGlobalContextRef, JSValueRef, JSValueRef* exception); +id valueToDictionary(JSGlobalContextRef, JSValueRef, JSValueRef* exception); + ++ (SEL)selectorForStructToValue:(const char *)structTag; ++ (SEL)selectorForValueToStruct:(const char *)structTag; + +@end + +NSInvocation *typeToValueInvocationFor(const char* encodedType); +NSInvocation *valueToTypeInvocationFor(const char* encodedType); + +#endif + +#endif // JSValueInternal_h diff --git a/API/JSValueRef.cpp b/API/JSValueRef.cpp new file mode 100644 index 0000000..c27b7c3 --- /dev/null +++ b/API/JSValueRef.cpp @@ -0,0 +1,452 @@ +/* + * Copyright (C) 2006, 2007, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSValueRef.h" + +#include "APICast.h" +#include "APIUtils.h" +#include "DateInstance.h" +#include "Exception.h" +#include "JSAPIWrapperObject.h" +#include "JSCInlines.h" +#include "JSCJSValue.h" +#include "JSCallbackObject.h" +#include "JSGlobalObject.h" +#include "JSONObject.h" +#include "JSString.h" +#include "LiteralParser.h" +#include "Protect.h" +#include +#include +#include +#include + +#if PLATFORM(MAC) +#include +#endif + +#if ENABLE(REMOTE_INSPECTOR) +#include "JSGlobalObjectInspectorController.h" +#endif + +using namespace JSC; + +#if PLATFORM(MAC) +static bool evernoteHackNeeded() +{ + static const int32_t webkitLastVersionWithEvernoteHack = 35133959; + static bool hackNeeded = CFEqual(CFBundleGetIdentifier(CFBundleGetMainBundle()), CFSTR("com.evernote.Evernote")) + && NSVersionOfLinkTimeLibrary("JavaScriptCore") <= webkitLastVersionWithEvernoteHack; + + return hackNeeded; +} +#endif + +::JSType JSValueGetType(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return kJSTypeUndefined; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + if (jsValue.isUndefined()) + return kJSTypeUndefined; + if (jsValue.isNull()) + return kJSTypeNull; + if (jsValue.isBoolean()) + return kJSTypeBoolean; + if (jsValue.isNumber()) + return kJSTypeNumber; + if (jsValue.isString()) + return kJSTypeString; + ASSERT(jsValue.isObject()); + return kJSTypeObject; +} + +bool JSValueIsUndefined(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isUndefined(); +} + +bool JSValueIsNull(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isNull(); +} + +bool JSValueIsBoolean(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isBoolean(); +} + +bool JSValueIsNumber(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isNumber(); +} + +bool JSValueIsString(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isString(); +} + +bool JSValueIsObject(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).isObject(); +} + +bool JSValueIsArray(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).inherits(JSArray::info()); +} + +bool JSValueIsDate(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toJS(exec, value).inherits(DateInstance::info()); +} + +bool JSValueIsObjectOfClass(JSContextRef ctx, JSValueRef value, JSClassRef jsClass) +{ + if (!ctx || !jsClass) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + if (JSObject* o = jsValue.getObject()) { + if (o->inherits(JSProxy::info())) + o = jsCast(o)->target(); + + if (o->inherits(JSCallbackObject::info())) + return jsCast*>(o)->inherits(jsClass); + if (o->inherits(JSCallbackObject::info())) + return jsCast*>(o)->inherits(jsClass); +#if JSC_OBJC_API_ENABLED + if (o->inherits(JSCallbackObject::info())) + return jsCast*>(o)->inherits(jsClass); +#endif + } + return false; +} + +bool JSValueIsEqual(JSContextRef ctx, JSValueRef a, JSValueRef b, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsA = toJS(exec, a); + JSValue jsB = toJS(exec, b); + + bool result = JSValue::equal(exec, jsA, jsB); // false if an exception is thrown + handleExceptionIfNeeded(exec, exception); + + return result; +} + +bool JSValueIsStrictEqual(JSContextRef ctx, JSValueRef a, JSValueRef b) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsA = toJS(exec, a); + JSValue jsB = toJS(exec, b); + + return JSValue::strictEqual(exec, jsA, jsB); +} + +bool JSValueIsInstanceOfConstructor(JSContextRef ctx, JSValueRef value, JSObjectRef constructor, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + JSObject* jsConstructor = toJS(constructor); + if (!jsConstructor->structure()->typeInfo().implementsHasInstance()) + return false; + bool result = jsConstructor->hasInstance(exec, jsValue); // false if an exception is thrown + handleExceptionIfNeeded(exec, exception); + return result; +} + +JSValueRef JSValueMakeUndefined(JSContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(exec, jsUndefined()); +} + +JSValueRef JSValueMakeNull(JSContextRef ctx) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(exec, jsNull()); +} + +JSValueRef JSValueMakeBoolean(JSContextRef ctx, bool value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(exec, jsBoolean(value)); +} + +JSValueRef JSValueMakeNumber(JSContextRef ctx, double value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(exec, jsNumber(purifyNaN(value))); +} + +JSValueRef JSValueMakeString(JSContextRef ctx, JSStringRef string) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + return toRef(exec, jsString(exec, string ? string->string() : String())); +} + +JSValueRef JSValueMakeFromJSONString(JSContextRef ctx, JSStringRef string) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + String str = string->string(); + unsigned length = str.length(); + if (!length || str.is8Bit()) { + LiteralParser parser(exec, str.characters8(), length, StrictJSON); + return toRef(exec, parser.tryLiteralParse()); + } + LiteralParser parser(exec, str.characters16(), length, StrictJSON); + return toRef(exec, parser.tryLiteralParse()); +} + +JSStringRef JSValueCreateJSONString(JSContextRef ctx, JSValueRef apiValue, unsigned indent, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSValue value = toJS(exec, apiValue); + String result = JSONStringify(exec, value, indent); + if (exception) + *exception = 0; + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + return 0; + return OpaqueJSString::create(result).leakRef(); +} + +bool JSValueToBoolean(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return false; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + return jsValue.toBoolean(exec); +} + +double JSValueToNumber(JSContextRef ctx, JSValueRef value, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return PNaN; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + double number = jsValue.toNumber(exec); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + number = PNaN; + return number; +} + +JSStringRef JSValueToStringCopy(JSContextRef ctx, JSValueRef value, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + auto stringRef(OpaqueJSString::create(jsValue.toWTFString(exec))); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + stringRef = nullptr; + return stringRef.leakRef(); +} + +JSObjectRef JSValueToObject(JSContextRef ctx, JSValueRef value, JSValueRef* exception) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJS(exec, value); + + JSObjectRef objectRef = toRef(jsValue.toObject(exec)); + if (handleExceptionIfNeeded(exec, exception) == ExceptionStatus::DidThrow) + objectRef = 0; + return objectRef; +} + +void JSValueProtect(JSContextRef ctx, JSValueRef value) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJSForGC(exec, value); + gcProtect(jsValue); +} + +void JSValueUnprotect(JSContextRef ctx, JSValueRef value) +{ +#if PLATFORM(MAC) + if ((!value || !ctx) && evernoteHackNeeded()) + return; +#endif + + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + + JSValue jsValue = toJSForGC(exec, value); + gcUnprotect(jsValue); +} diff --git a/API/JSValueRef.h b/API/JSValueRef.h new file mode 100644 index 0000000..9815de7 --- /dev/null +++ b/API/JSValueRef.h @@ -0,0 +1,359 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSValueRef_h +#define JSValueRef_h + +#include +#include + +#ifndef __cplusplus +#include +#endif + +/*! +@enum JSType +@abstract A constant identifying the type of a JSValue. +@constant kJSTypeUndefined The unique undefined value. +@constant kJSTypeNull The unique null value. +@constant kJSTypeBoolean A primitive boolean value, one of true or false. +@constant kJSTypeNumber A primitive number value. +@constant kJSTypeString A primitive string value. +@constant kJSTypeObject An object value (meaning that this JSValueRef is a JSObjectRef). +*/ +typedef enum { + kJSTypeUndefined, + kJSTypeNull, + kJSTypeBoolean, + kJSTypeNumber, + kJSTypeString, + kJSTypeObject +} JSType; + +/*! + @enum JSTypedArrayType + @abstract A constant identifying the Typed Array type of a JSObjectRef. + @constant kJSTypedArrayTypeInt8Array Int8Array + @constant kJSTypedArrayTypeInt16Array Int16Array + @constant kJSTypedArrayTypeInt32Array Int32Array + @constant kJSTypedArrayTypeUint8Array Uint8Array + @constant kJSTypedArrayTypeUint8ClampedArray Uint8ClampedArray + @constant kJSTypedArrayTypeUint16Array Uint16Array + @constant kJSTypedArrayTypeUint32Array Uint32Array + @constant kJSTypedArrayTypeFloat32Array Float32Array + @constant kJSTypedArrayTypeFloat64Array Float64Array + @constant kJSTypedArrayTypeArrayBuffer ArrayBuffer + @constant kJSTypedArrayTypeNone Not a Typed Array + + */ +typedef enum { + kJSTypedArrayTypeInt8Array, + kJSTypedArrayTypeInt16Array, + kJSTypedArrayTypeInt32Array, + kJSTypedArrayTypeUint8Array, + kJSTypedArrayTypeUint8ClampedArray, + kJSTypedArrayTypeUint16Array, + kJSTypedArrayTypeUint32Array, + kJSTypedArrayTypeFloat32Array, + kJSTypedArrayTypeFloat64Array, + kJSTypedArrayTypeArrayBuffer, + kJSTypedArrayTypeNone, +} JSTypedArrayType CF_ENUM_AVAILABLE(10_12, 10_0); + +#ifdef __cplusplus +extern "C" { +#endif + +/*! +@function +@abstract Returns a JavaScript value's type. +@param ctx The execution context to use. +@param value The JSValue whose type you want to obtain. +@result A value of type JSType that identifies value's type. +*/ +JS_EXPORT JSType JSValueGetType(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the undefined type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the undefined type, otherwise false. +*/ +JS_EXPORT bool JSValueIsUndefined(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the null type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the null type, otherwise false. +*/ +JS_EXPORT bool JSValueIsNull(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the boolean type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the boolean type, otherwise false. +*/ +JS_EXPORT bool JSValueIsBoolean(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the number type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the number type, otherwise false. +*/ +JS_EXPORT bool JSValueIsNumber(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the string type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the string type, otherwise false. +*/ +JS_EXPORT bool JSValueIsString(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value's type is the object type. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value's type is the object type, otherwise false. +*/ +JS_EXPORT bool JSValueIsObject(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Tests whether a JavaScript value is an object with a given class in its class chain. +@param ctx The execution context to use. +@param value The JSValue to test. +@param jsClass The JSClass to test against. +@result true if value is an object and has jsClass in its class chain, otherwise false. +*/ +JS_EXPORT bool JSValueIsObjectOfClass(JSContextRef ctx, JSValueRef value, JSClassRef jsClass); + +/*! +@function +@abstract Tests whether a JavaScript value is an array. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value is an array, otherwise false. +*/ +JS_EXPORT bool JSValueIsArray(JSContextRef ctx, JSValueRef value) CF_AVAILABLE(10_11, 9_0); + +/*! +@function +@abstract Tests whether a JavaScript value is a date. +@param ctx The execution context to use. +@param value The JSValue to test. +@result true if value is a date, otherwise false. +*/ +JS_EXPORT bool JSValueIsDate(JSContextRef ctx, JSValueRef value) CF_AVAILABLE(10_11, 9_0); + +/*! +@function +@abstract Returns a JavaScript value's Typed Array type. +@param ctx The execution context to use. +@param value The JSValue whose Typed Array type to return. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result A value of type JSTypedArrayType that identifies value's Typed Array type, or kJSTypedArrayTypeNone if the value is not a Typed Array object. + */ +JS_EXPORT JSTypedArrayType JSValueGetTypedArrayType(JSContextRef ctx, JSValueRef value, JSValueRef* exception) CF_AVAILABLE(10_12, 10_0); + +/* Comparing values */ + +/*! +@function +@abstract Tests whether two JavaScript values are equal, as compared by the JS == operator. +@param ctx The execution context to use. +@param a The first value to test. +@param b The second value to test. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result true if the two values are equal, false if they are not equal or an exception is thrown. +*/ +JS_EXPORT bool JSValueIsEqual(JSContextRef ctx, JSValueRef a, JSValueRef b, JSValueRef* exception); + +/*! +@function +@abstract Tests whether two JavaScript values are strict equal, as compared by the JS === operator. +@param ctx The execution context to use. +@param a The first value to test. +@param b The second value to test. +@result true if the two values are strict equal, otherwise false. +*/ +JS_EXPORT bool JSValueIsStrictEqual(JSContextRef ctx, JSValueRef a, JSValueRef b); + +/*! +@function +@abstract Tests whether a JavaScript value is an object constructed by a given constructor, as compared by the JS instanceof operator. +@param ctx The execution context to use. +@param value The JSValue to test. +@param constructor The constructor to test against. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result true if value is an object constructed by constructor, as compared by the JS instanceof operator, otherwise false. +*/ +JS_EXPORT bool JSValueIsInstanceOfConstructor(JSContextRef ctx, JSValueRef value, JSObjectRef constructor, JSValueRef* exception); + +/* Creating values */ + +/*! +@function +@abstract Creates a JavaScript value of the undefined type. +@param ctx The execution context to use. +@result The unique undefined value. +*/ +JS_EXPORT JSValueRef JSValueMakeUndefined(JSContextRef ctx); + +/*! +@function +@abstract Creates a JavaScript value of the null type. +@param ctx The execution context to use. +@result The unique null value. +*/ +JS_EXPORT JSValueRef JSValueMakeNull(JSContextRef ctx); + +/*! +@function +@abstract Creates a JavaScript value of the boolean type. +@param ctx The execution context to use. +@param boolean The bool to assign to the newly created JSValue. +@result A JSValue of the boolean type, representing the value of boolean. +*/ +JS_EXPORT JSValueRef JSValueMakeBoolean(JSContextRef ctx, bool boolean); + +/*! +@function +@abstract Creates a JavaScript value of the number type. +@param ctx The execution context to use. +@param number The double to assign to the newly created JSValue. +@result A JSValue of the number type, representing the value of number. +*/ +JS_EXPORT JSValueRef JSValueMakeNumber(JSContextRef ctx, double number); + +/*! +@function +@abstract Creates a JavaScript value of the string type. +@param ctx The execution context to use. +@param string The JSString to assign to the newly created JSValue. The + newly created JSValue retains string, and releases it upon garbage collection. +@result A JSValue of the string type, representing the value of string. +*/ +JS_EXPORT JSValueRef JSValueMakeString(JSContextRef ctx, JSStringRef string); + +/* Converting to and from JSON formatted strings */ + +/*! + @function + @abstract Creates a JavaScript value from a JSON formatted string. + @param ctx The execution context to use. + @param string The JSString containing the JSON string to be parsed. + @result A JSValue containing the parsed value, or NULL if the input is invalid. + */ +JS_EXPORT JSValueRef JSValueMakeFromJSONString(JSContextRef ctx, JSStringRef string) CF_AVAILABLE(10_7, 7_0); + +/*! + @function + @abstract Creates a JavaScript string containing the JSON serialized representation of a JS value. + @param ctx The execution context to use. + @param value The value to serialize. + @param indent The number of spaces to indent when nesting. If 0, the resulting JSON will not contains newlines. The size of the indent is clamped to 10 spaces. + @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. + @result A JSString with the result of serialization, or NULL if an exception is thrown. + */ +JS_EXPORT JSStringRef JSValueCreateJSONString(JSContextRef ctx, JSValueRef value, unsigned indent, JSValueRef* exception) CF_AVAILABLE(10_7, 7_0); + +/* Converting to primitive values */ + +/*! +@function +@abstract Converts a JavaScript value to boolean and returns the resulting boolean. +@param ctx The execution context to use. +@param value The JSValue to convert. +@result The boolean result of conversion. +*/ +JS_EXPORT bool JSValueToBoolean(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Converts a JavaScript value to number and returns the resulting number. +@param ctx The execution context to use. +@param value The JSValue to convert. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The numeric result of conversion, or NaN if an exception is thrown. +*/ +JS_EXPORT double JSValueToNumber(JSContextRef ctx, JSValueRef value, JSValueRef* exception); + +/*! +@function +@abstract Converts a JavaScript value to string and copies the result into a JavaScript string. +@param ctx The execution context to use. +@param value The JSValue to convert. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result A JSString with the result of conversion, or NULL if an exception is thrown. Ownership follows the Create Rule. +*/ +JS_EXPORT JSStringRef JSValueToStringCopy(JSContextRef ctx, JSValueRef value, JSValueRef* exception); + +/*! +@function +@abstract Converts a JavaScript value to object and returns the resulting object. +@param ctx The execution context to use. +@param value The JSValue to convert. +@param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. +@result The JSObject result of conversion, or NULL if an exception is thrown. +*/ +JS_EXPORT JSObjectRef JSValueToObject(JSContextRef ctx, JSValueRef value, JSValueRef* exception); + +/* Garbage collection */ +/*! +@function +@abstract Protects a JavaScript value from garbage collection. +@param ctx The execution context to use. +@param value The JSValue to protect. +@discussion Use this method when you want to store a JSValue in a global or on the heap, where the garbage collector will not be able to discover your reference to it. + +A value may be protected multiple times and must be unprotected an equal number of times before becoming eligible for garbage collection. +*/ +JS_EXPORT void JSValueProtect(JSContextRef ctx, JSValueRef value); + +/*! +@function +@abstract Unprotects a JavaScript value from garbage collection. +@param ctx The execution context to use. +@param value The JSValue to unprotect. +@discussion A value may be protected multiple times and must be unprotected an + equal number of times before becoming eligible for garbage collection. +*/ +JS_EXPORT void JSValueUnprotect(JSContextRef ctx, JSValueRef value); + +#ifdef __cplusplus +} +#endif + +#endif /* JSValueRef_h */ diff --git a/API/JSVirtualMachine.h b/API/JSVirtualMachine.h new file mode 100644 index 0000000..ccf9264 --- /dev/null +++ b/API/JSVirtualMachine.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +/*! +@interface +@discussion An instance of JSVirtualMachine represents a single JavaScript "object space" + or set of execution resources. Thread safety is supported by locking the + virtual machine, with concurrent JavaScript execution supported by allocating + separate instances of JSVirtualMachine. +*/ +NS_CLASS_AVAILABLE(10_9, 7_0) +@interface JSVirtualMachine : NSObject + +/*! +@methodgroup Creating New Virtual Machines +*/ +/*! +@method +@abstract Create a new JSVirtualMachine. +*/ +- (instancetype)init; + +/*! +@methodgroup Memory Management +*/ +/*! +@method +@abstract Notify the JSVirtualMachine of an external object relationship. +@discussion Allows clients of JSVirtualMachine to make the JavaScript runtime aware of + arbitrary external Objective-C object graphs. The runtime can then use + this information to retain any JavaScript values that are referenced + from somewhere in said object graph. + + For correct behavior clients must make their external object graphs + reachable from within the JavaScript runtime. If an Objective-C object is + reachable from within the JavaScript runtime, all managed references + transitively reachable from it as recorded using + -addManagedReference:withOwner: will be scanned by the garbage collector. +@param object The object that the owner points to. +@param owner The object that owns the pointed to object. +*/ +- (void)addManagedReference:(id)object withOwner:(id)owner; + +/*! +@method +@abstract Notify the JSVirtualMachine that a previous object relationship no longer exists. +@discussion The JavaScript runtime will continue to scan any references that were + reported to it by -addManagedReference:withOwner: until those references are removed. +@param object The object that was formerly owned. +@param owner The former owner. +*/ +- (void)removeManagedReference:(id)object withOwner:(id)owner; + +@end + +#endif diff --git a/API/JSVirtualMachine.mm b/API/JSVirtualMachine.mm new file mode 100644 index 0000000..2bbc043 --- /dev/null +++ b/API/JSVirtualMachine.mm @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2013-2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#import "JavaScriptCore.h" + +#if JSC_OBJC_API_ENABLED + +#import "APICast.h" +#import "JSManagedValueInternal.h" +#import "JSVirtualMachine.h" +#import "JSVirtualMachineInternal.h" +#import "JSWrapperMap.h" +#import "SigillCrashAnalyzer.h" +#import "SlotVisitorInlines.h" +#import +#import +#import + +static NSMapTable *globalWrapperCache = 0; + +static StaticLock wrapperCacheMutex; + +static void initWrapperCache() +{ + ASSERT(!globalWrapperCache); + NSPointerFunctionsOptions keyOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsOpaquePersonality; + NSPointerFunctionsOptions valueOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + globalWrapperCache = [[NSMapTable alloc] initWithKeyOptions:keyOptions valueOptions:valueOptions capacity:0]; +} + +static NSMapTable *wrapperCache() +{ + if (!globalWrapperCache) + initWrapperCache(); + return globalWrapperCache; +} + +@interface JSVMWrapperCache : NSObject ++ (void)addWrapper:(JSVirtualMachine *)wrapper forJSContextGroupRef:(JSContextGroupRef)group; ++ (JSVirtualMachine *)wrapperForJSContextGroupRef:(JSContextGroupRef)group; +@end + +@implementation JSVMWrapperCache + ++ (void)addWrapper:(JSVirtualMachine *)wrapper forJSContextGroupRef:(JSContextGroupRef)group +{ + std::lock_guard lock(wrapperCacheMutex); + NSMapInsert(wrapperCache(), group, wrapper); +} + ++ (JSVirtualMachine *)wrapperForJSContextGroupRef:(JSContextGroupRef)group +{ + std::lock_guard lock(wrapperCacheMutex); + return static_cast(NSMapGet(wrapperCache(), group)); +} + +@end + +@implementation JSVirtualMachine { + JSContextGroupRef m_group; + Lock m_externalDataMutex; + NSMapTable *m_contextCache; + NSMapTable *m_externalObjectGraph; + NSMapTable *m_externalRememberedSet; +} + +- (instancetype)init +{ + JSContextGroupRef group = JSContextGroupCreate(); + self = [self initWithContextGroupRef:group]; + // The extra JSContextGroupRetain is balanced here. + JSContextGroupRelease(group); + return self; +} + +- (instancetype)initWithContextGroupRef:(JSContextGroupRef)group +{ + self = [super init]; + if (!self) + return nil; + + m_group = JSContextGroupRetain(group); + + NSPointerFunctionsOptions keyOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsOpaquePersonality; + NSPointerFunctionsOptions valueOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + m_contextCache = [[NSMapTable alloc] initWithKeyOptions:keyOptions valueOptions:valueOptions capacity:0]; + + NSPointerFunctionsOptions weakIDOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + NSPointerFunctionsOptions strongIDOptions = NSPointerFunctionsStrongMemory | NSPointerFunctionsObjectPersonality; + m_externalObjectGraph = [[NSMapTable alloc] initWithKeyOptions:weakIDOptions valueOptions:strongIDOptions capacity:0]; + + NSPointerFunctionsOptions integerOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsIntegerPersonality; + m_externalRememberedSet = [[NSMapTable alloc] initWithKeyOptions:weakIDOptions valueOptions:integerOptions capacity:0]; + + [JSVMWrapperCache addWrapper:self forJSContextGroupRef:group]; + + return self; +} + +- (void)dealloc +{ + JSContextGroupRelease(m_group); + [m_contextCache release]; + [m_externalObjectGraph release]; + [m_externalRememberedSet release]; + [super dealloc]; +} + +static id getInternalObjcObject(id object) +{ + if ([object isKindOfClass:[JSManagedValue class]]) { + JSValue* value = [static_cast(object) value]; + if (!value) + return nil; + id temp = tryUnwrapObjcObject([value.context JSGlobalContextRef], [value JSValueRef]); + if (temp) + return temp; + return object; + } + + if ([object isKindOfClass:[JSValue class]]) { + JSValue *value = static_cast(object); + object = tryUnwrapObjcObject([value.context JSGlobalContextRef], [value JSValueRef]); + } + + return object; +} + +- (bool)isOldExternalObject:(id)object +{ + JSC::VM* vm = toJS(m_group); + return vm->heap.collectorSlotVisitor().containsOpaqueRoot(object); +} + +- (void)addExternalRememberedObject:(id)object +{ + auto locker = holdLock(m_externalDataMutex); + ASSERT([self isOldExternalObject:object]); + [m_externalRememberedSet setObject:@YES forKey:object]; +} + +- (void)addManagedReference:(id)object withOwner:(id)owner +{ + if ([object isKindOfClass:[JSManagedValue class]]) + [object didAddOwner:owner]; + + object = getInternalObjcObject(object); + owner = getInternalObjcObject(owner); + + if (!object || !owner) + return; + + JSC::JSLockHolder locker(toJS(m_group)); + if ([self isOldExternalObject:owner] && ![self isOldExternalObject:object]) + [self addExternalRememberedObject:owner]; + + auto externalDataMutexLocker = holdLock(m_externalDataMutex); + NSMapTable *ownedObjects = [m_externalObjectGraph objectForKey:owner]; + if (!ownedObjects) { + NSPointerFunctionsOptions weakIDOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + NSPointerFunctionsOptions integerOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsIntegerPersonality; + ownedObjects = [[NSMapTable alloc] initWithKeyOptions:weakIDOptions valueOptions:integerOptions capacity:1]; + + [m_externalObjectGraph setObject:ownedObjects forKey:owner]; + [ownedObjects release]; + } + + size_t count = reinterpret_cast(NSMapGet(ownedObjects, object)); + NSMapInsert(ownedObjects, object, reinterpret_cast(count + 1)); +} + +- (void)removeManagedReference:(id)object withOwner:(id)owner +{ + if ([object isKindOfClass:[JSManagedValue class]]) + [object didRemoveOwner:owner]; + + object = getInternalObjcObject(object); + owner = getInternalObjcObject(owner); + + if (!object || !owner) + return; + + JSC::JSLockHolder locker(toJS(m_group)); + + auto externalDataMutexLocker = holdLock(m_externalDataMutex); + NSMapTable *ownedObjects = [m_externalObjectGraph objectForKey:owner]; + if (!ownedObjects) + return; + + size_t count = reinterpret_cast(NSMapGet(ownedObjects, object)); + if (count > 1) { + NSMapInsert(ownedObjects, object, reinterpret_cast(count - 1)); + return; + } + + if (count == 1) + NSMapRemove(ownedObjects, object); + + if (![ownedObjects count]) { + [m_externalObjectGraph removeObjectForKey:owner]; + [m_externalRememberedSet removeObjectForKey:owner]; + } +} + +- (void)enableSigillCrashAnalyzer +{ + JSC::enableSigillCrashAnalyzer(); +} + +@end + +@implementation JSVirtualMachine(Internal) + +JSContextGroupRef getGroupFromVirtualMachine(JSVirtualMachine *virtualMachine) +{ + return virtualMachine->m_group; +} + ++ (JSVirtualMachine *)virtualMachineWithContextGroupRef:(JSContextGroupRef)group +{ + JSVirtualMachine *virtualMachine = [JSVMWrapperCache wrapperForJSContextGroupRef:group]; + if (!virtualMachine) + virtualMachine = [[[JSVirtualMachine alloc] initWithContextGroupRef:group] autorelease]; + return virtualMachine; +} + +- (JSContext *)contextForGlobalContextRef:(JSGlobalContextRef)globalContext +{ + return static_cast(NSMapGet(m_contextCache, globalContext)); +} + +- (void)addContext:(JSContext *)wrapper forGlobalContextRef:(JSGlobalContextRef)globalContext +{ + NSMapInsert(m_contextCache, globalContext, wrapper); +} + +- (Lock&)externalDataMutex +{ + return m_externalDataMutex; +} + +- (NSMapTable *)externalObjectGraph +{ + return m_externalObjectGraph; +} + +- (NSMapTable *)externalRememberedSet +{ + return m_externalRememberedSet; +} + +@end + +static void scanExternalObjectGraph(JSC::VM& vm, JSC::SlotVisitor& visitor, void* root, bool lockAcquired) +{ + @autoreleasepool { + JSVirtualMachine *virtualMachine = [JSVMWrapperCache wrapperForJSContextGroupRef:toRef(&vm)]; + if (!virtualMachine) + return; + NSMapTable *externalObjectGraph = [virtualMachine externalObjectGraph]; + Lock& externalDataMutex = [virtualMachine externalDataMutex]; + Vector stack; + stack.append(root); + while (!stack.isEmpty()) { + void* nextRoot = stack.last(); + stack.removeLast(); + if (visitor.containsOpaqueRootTriState(nextRoot) == TrueTriState) + continue; + visitor.addOpaqueRoot(nextRoot); + + auto appendOwnedObjects = [&] { + NSMapTable *ownedObjects = [externalObjectGraph objectForKey:static_cast(nextRoot)]; + for (id ownedObject in ownedObjects) + stack.append(static_cast(ownedObject)); + }; + + if (lockAcquired) + appendOwnedObjects(); + else { + auto locker = holdLock(externalDataMutex); + appendOwnedObjects(); + } + } + } +} + +void scanExternalObjectGraph(JSC::VM& vm, JSC::SlotVisitor& visitor, void* root) +{ + bool lockAcquired = false; + scanExternalObjectGraph(vm, visitor, root, lockAcquired); +} + +void scanExternalRememberedSet(JSC::VM& vm, JSC::SlotVisitor& visitor) +{ + @autoreleasepool { + JSVirtualMachine *virtualMachine = [JSVMWrapperCache wrapperForJSContextGroupRef:toRef(&vm)]; + if (!virtualMachine) + return; + Lock& externalDataMutex = [virtualMachine externalDataMutex]; + auto locker = holdLock(externalDataMutex); + NSMapTable *externalObjectGraph = [virtualMachine externalObjectGraph]; + NSMapTable *externalRememberedSet = [virtualMachine externalRememberedSet]; + for (id key in externalRememberedSet) { + NSMapTable *ownedObjects = [externalObjectGraph objectForKey:key]; + bool lockAcquired = true; + for (id ownedObject in ownedObjects) + scanExternalObjectGraph(vm, visitor, ownedObject, lockAcquired); + } + [externalRememberedSet removeAllObjects]; + } + + visitor.mergeIfNecessary(); +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/JSVirtualMachineInternal.h b/API/JSVirtualMachineInternal.h new file mode 100644 index 0000000..5ca9a7f --- /dev/null +++ b/API/JSVirtualMachineInternal.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2013, 2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSVirtualMachineInternal_h +#define JSVirtualMachineInternal_h + +#if JSC_OBJC_API_ENABLED + +#import + +namespace JSC { +class VM; +class SlotVisitor; +} + +#if defined(__OBJC__) +@class NSMapTable; + +@interface JSVirtualMachine(Internal) + +JSContextGroupRef getGroupFromVirtualMachine(JSVirtualMachine *); + ++ (JSVirtualMachine *)virtualMachineWithContextGroupRef:(JSContextGroupRef)group; + +- (JSContext *)contextForGlobalContextRef:(JSGlobalContextRef)globalContext; +- (void)addContext:(JSContext *)wrapper forGlobalContextRef:(JSGlobalContextRef)globalContext; + +@end +#endif // defined(__OBJC__) + +void scanExternalObjectGraph(JSC::VM&, JSC::SlotVisitor&, void* root); +void scanExternalRememberedSet(JSC::VM&, JSC::SlotVisitor&); + +#endif // JSC_OBJC_API_ENABLED + +#endif // JSVirtualMachineInternal_h diff --git a/API/JSVirtualMachinePrivate.h b/API/JSVirtualMachinePrivate.h new file mode 100644 index 0000000..3e5fd42 --- /dev/null +++ b/API/JSVirtualMachinePrivate.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSVirtualMachinePrivate_h +#define JSVirtualMachinePrivate_h + +#if JSC_OBJC_API_ENABLED + +@interface JSVirtualMachine(Private) + +/*! + @method + @abstract Enables SIGILL crash analysis for all JSVirtualMachines. + @discussion Installs a SIGILL crash handler that will collect additional + non-user identifying information about the crash site via os_log_info. + */ +- (void)enableSigillCrashAnalyzer; + +@end + +#endif + +#endif // JSVirtualMachinePrivate_h diff --git a/API/JSWeakObjectMapRefInternal.h b/API/JSWeakObjectMapRefInternal.h new file mode 100644 index 0000000..9037947 --- /dev/null +++ b/API/JSWeakObjectMapRefInternal.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSWeakObjectMapRefInternal_h +#define JSWeakObjectMapRefInternal_h + +#include "WeakGCMap.h" +#include + +namespace JSC { + +class JSObject; + +} + +typedef void (*JSWeakMapDestroyedCallback)(struct OpaqueJSWeakObjectMap*, void*); + +typedef JSC::WeakGCMap WeakMapType; + +struct OpaqueJSWeakObjectMap : public RefCounted { +public: + static Ref create(JSC::VM& vm, void* data, JSWeakMapDestroyedCallback callback) + { + return adoptRef(*new OpaqueJSWeakObjectMap(vm, data, callback)); + } + + WeakMapType& map() { return m_map; } + + ~OpaqueJSWeakObjectMap() + { + m_callback(this, m_data); + } + +private: + OpaqueJSWeakObjectMap(JSC::VM& vm, void* data, JSWeakMapDestroyedCallback callback) + : m_map(vm) + , m_data(data) + , m_callback(callback) + { + } + WeakMapType m_map; + void* m_data; + JSWeakMapDestroyedCallback m_callback; +}; + + +#endif // JSWeakObjectMapInternal_h diff --git a/API/JSWeakObjectMapRefPrivate.cpp b/API/JSWeakObjectMapRefPrivate.cpp new file mode 100644 index 0000000..9b825b0 --- /dev/null +++ b/API/JSWeakObjectMapRefPrivate.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSWeakObjectMapRefPrivate.h" + +#include "APICast.h" +#include "JSCJSValue.h" +#include "JSCallbackObject.h" +#include "JSWeakObjectMapRefInternal.h" +#include "JSCInlines.h" +#include "Weak.h" +#include "WeakGCMapInlines.h" + +using namespace WTF; +using namespace JSC; + +#ifdef __cplusplus +extern "C" { +#endif + +JSWeakObjectMapRef JSWeakObjectMapCreate(JSContextRef context, void* privateData, JSWeakMapDestroyedCallback callback) +{ + ExecState* exec = toJS(context); + JSLockHolder locker(exec); + RefPtr map = OpaqueJSWeakObjectMap::create(exec->vm(), privateData, callback); + exec->lexicalGlobalObject()->registerWeakMap(map.get()); + return map.get(); +} + +void JSWeakObjectMapSet(JSContextRef ctx, JSWeakObjectMapRef map, void* key, JSObjectRef object) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + JSObject* obj = toJS(object); + if (!obj) + return; + ASSERT(obj->inherits(JSProxy::info()) + || obj->inherits(JSCallbackObject::info()) + || obj->inherits(JSCallbackObject::info())); + map->map().set(key, obj); +} + +JSObjectRef JSWeakObjectMapGet(JSContextRef ctx, JSWeakObjectMapRef map, void* key) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return 0; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + return toRef(jsCast(map->map().get(key))); +} + +void JSWeakObjectMapRemove(JSContextRef ctx, JSWeakObjectMapRef map, void* key) +{ + if (!ctx) { + ASSERT_NOT_REACHED(); + return; + } + ExecState* exec = toJS(ctx); + JSLockHolder locker(exec); + map->map().remove(key); +} + +// We need to keep this function in the build to keep the nightlies running. +JS_EXPORT bool JSWeakObjectMapClear(JSContextRef, JSWeakObjectMapRef, void*, JSObjectRef); +bool JSWeakObjectMapClear(JSContextRef, JSWeakObjectMapRef, void*, JSObjectRef) +{ + return true; +} + +#ifdef __cplusplus +} +#endif diff --git a/API/JSWeakObjectMapRefPrivate.h b/API/JSWeakObjectMapRefPrivate.h new file mode 100644 index 0000000..a335e23 --- /dev/null +++ b/API/JSWeakObjectMapRefPrivate.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JSWeakObjectMapRefPrivate_h +#define JSWeakObjectMapRefPrivate_h + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! @typedef JSWeakObjectMapRef A weak map for storing JSObjectRefs */ +typedef struct OpaqueJSWeakObjectMap* JSWeakObjectMapRef; + +/*! + @typedef JSWeakMapDestroyedCallback + @abstract The callback invoked when a JSWeakObjectMapRef is being destroyed. + @param map The map that is being destroyed. + @param data The private data (if any) that was associated with the map instance. + */ +typedef void (*JSWeakMapDestroyedCallback)(JSWeakObjectMapRef map, void* data); + +/*! + @function + @abstract Creates a weak value map that can be used to reference user defined objects without preventing them from being collected. + @param ctx The execution context to use. + @param data A void* to set as the map's private data. Pass NULL to specify no private data. + @param destructor A function to call when the weak map is destroyed. + @result A JSWeakObjectMapRef bound to the given context, data and destructor. + @discussion The JSWeakObjectMapRef can be used as a storage mechanism to hold custom JS objects without forcing those objects to + remain live as JSValueProtect would. + */ +JS_EXPORT JSWeakObjectMapRef JSWeakObjectMapCreate(JSContextRef ctx, void* data, JSWeakMapDestroyedCallback destructor); + +/*! + @function + @abstract Associates a JSObjectRef with the given key in a JSWeakObjectMap. + @param ctx The execution context to use. + @param map The map to operate on. + @param key The key to associate a weak reference with. + @param object The user defined object to associate with the key. + */ +JS_EXPORT void JSWeakObjectMapSet(JSContextRef ctx, JSWeakObjectMapRef map, void* key, JSObjectRef object); + +/*! + @function + @abstract Retrieves the JSObjectRef associated with a key. + @param ctx The execution context to use. + @param map The map to query. + @param key The key to search for. + @result Either the live object associated with the provided key, or NULL. + */ +JS_EXPORT JSObjectRef JSWeakObjectMapGet(JSContextRef ctx, JSWeakObjectMapRef map, void* key); + +/*! + @function + @abstract Removes the entry for the given key if the key is present, otherwise it has no effect. + @param ctx The execution context to use. + @param map The map to use. + @param key The key to remove. + */ +JS_EXPORT void JSWeakObjectMapRemove(JSContextRef ctx, JSWeakObjectMapRef map, void* key); + +#ifdef __cplusplus +} +#endif + +#endif // JSWeakObjectMapPrivate_h diff --git a/API/JSWrapperMap.h b/API/JSWrapperMap.h new file mode 100644 index 0000000..c6aa1af --- /dev/null +++ b/API/JSWrapperMap.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import +#import + +#if JSC_OBJC_API_ENABLED + +@interface JSWrapperMap : NSObject + +- (id)initWithContext:(JSContext *)context; + +- (JSValue *)jsWrapperForObject:(id)object; + +- (JSValue *)objcWrapperForJSValueRef:(JSValueRef)value; + +@end + +id tryUnwrapObjcObject(JSGlobalContextRef, JSValueRef); + +bool supportsInitMethodConstructors(); +Protocol *getJSExportProtocol(); +Class getNSBlockClass(); + +#endif diff --git a/API/JSWrapperMap.mm b/API/JSWrapperMap.mm new file mode 100644 index 0000000..8046188 --- /dev/null +++ b/API/JSWrapperMap.mm @@ -0,0 +1,691 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#import "JavaScriptCore.h" + +#if JSC_OBJC_API_ENABLED +#import "APICast.h" +#import "JSAPIWrapperObject.h" +#import "JSCInlines.h" +#import "JSCallbackObject.h" +#import "JSContextInternal.h" +#import "JSWrapperMap.h" +#import "ObjCCallbackFunction.h" +#import "ObjcRuntimeExtras.h" +#import "WeakGCMap.h" +#import "WeakGCMapInlines.h" +#import +#import +#import + +#include + +#if PLATFORM(APPLETV) +#else +static const int32_t firstJavaScriptCoreVersionWithInitConstructorSupport = 0x21A0400; // 538.4.0 +#if PLATFORM(IOS) +static const uint32_t firstSDKVersionWithInitConstructorSupport = DYLD_IOS_VERSION_10_0; +#elif PLATFORM(MAC) +static const uint32_t firstSDKVersionWithInitConstructorSupport = 0xA0A00; // OSX 10.10.0 +#endif +#endif + +@class JSObjCClassInfo; + +@interface JSWrapperMap () + +- (JSObjCClassInfo*)classInfoForClass:(Class)cls; + +@end + +// Default conversion of selectors to property names. +// All semicolons are removed, lowercase letters following a semicolon are capitalized. +static NSString *selectorToPropertyName(const char* start) +{ + // Use 'index' to check for colons, if there are none, this is easy! + const char* firstColon = strchr(start, ':'); + if (!firstColon) + return [NSString stringWithUTF8String:start]; + + // 'header' is the length of string up to the first colon. + size_t header = firstColon - start; + // The new string needs to be long enough to hold 'header', plus the remainder of the string, excluding + // at least one ':', but including a '\0'. (This is conservative if there are more than one ':'). + char* buffer = static_cast(malloc(header + strlen(firstColon + 1) + 1)); + // Copy 'header' characters, set output to point to the end of this & input to point past the first ':'. + memcpy(buffer, start, header); + char* output = buffer + header; + const char* input = start + header + 1; + + // On entry to the loop, we have already skipped over a ':' from the input. + while (true) { + char c; + // Skip over any additional ':'s. We'll leave c holding the next character after the + // last ':', and input pointing past c. + while ((c = *(input++)) == ':'); + // Copy the character, converting to upper case if necessary. + // If the character we copy is '\0', then we're done! + if (!(*(output++) = toupper(c))) + goto done; + // Loop over characters other than ':'. + while ((c = *(input++)) != ':') { + // Copy the character. + // If the character we copy is '\0', then we're done! + if (!(*(output++) = c)) + goto done; + } + // If we get here, we've consumed a ':' - wash, rinse, repeat. + } +done: + NSString *result = [NSString stringWithUTF8String:buffer]; + free(buffer); + return result; +} + +static bool constructorHasInstance(JSContextRef ctx, JSObjectRef constructorRef, JSValueRef possibleInstance, JSValueRef*) +{ + JSC::ExecState* exec = toJS(ctx); + JSC::JSLockHolder locker(exec); + + JSC::JSObject* constructor = toJS(constructorRef); + JSC::JSValue instance = toJS(exec, possibleInstance); + return JSC::JSObject::defaultHasInstance(exec, instance, constructor->get(exec, exec->propertyNames().prototype)); +} + +static JSC::JSObject* makeWrapper(JSContextRef ctx, JSClassRef jsClass, id wrappedObject) +{ + JSC::ExecState* exec = toJS(ctx); + JSC::JSLockHolder locker(exec); + + ASSERT(jsClass); + JSC::JSCallbackObject* object = JSC::JSCallbackObject::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->objcWrapperObjectStructure(), jsClass, 0); + object->setWrappedObject(wrappedObject); + if (JSC::JSObject* prototype = jsClass->prototype(exec)) + object->setPrototypeDirect(exec->vm(), prototype); + + return object; +} + +// Make an object that is in all ways a completely vanilla JavaScript object, +// other than that it has a native brand set that will be displayed by the default +// Object.prototype.toString conversion. +static JSC::JSObject *objectWithCustomBrand(JSContext *context, NSString *brand, Class cls = 0) +{ + JSClassDefinition definition; + definition = kJSClassDefinitionEmpty; + definition.className = [brand UTF8String]; + JSClassRef classRef = JSClassCreate(&definition); + JSC::JSObject* result = makeWrapper([context JSGlobalContextRef], classRef, cls); + JSClassRelease(classRef); + return result; +} + +static JSC::JSObject *constructorWithCustomBrand(JSContext *context, NSString *brand, Class cls) +{ + JSClassDefinition definition; + definition = kJSClassDefinitionEmpty; + definition.className = [brand UTF8String]; + definition.hasInstance = constructorHasInstance; + JSClassRef classRef = JSClassCreate(&definition); + JSC::JSObject* result = makeWrapper([context JSGlobalContextRef], classRef, cls); + JSClassRelease(classRef); + return result; +} + +// Look for @optional properties in the prototype containing a selector to property +// name mapping, separated by a __JS_EXPORT_AS__ delimiter. +static NSMutableDictionary *createRenameMap(Protocol *protocol, BOOL isInstanceMethod) +{ + NSMutableDictionary *renameMap = [[NSMutableDictionary alloc] init]; + + forEachMethodInProtocol(protocol, NO, isInstanceMethod, ^(SEL sel, const char*){ + NSString *rename = @(sel_getName(sel)); + NSRange range = [rename rangeOfString:@"__JS_EXPORT_AS__"]; + if (range.location == NSNotFound) + return; + NSString *selector = [rename substringToIndex:range.location]; + NSUInteger begin = range.location + range.length; + NSUInteger length = [rename length] - begin - 1; + NSString *name = [rename substringWithRange:(NSRange){ begin, length }]; + renameMap[selector] = name; + }); + + return renameMap; +} + +inline void putNonEnumerable(JSValue *base, NSString *propertyName, JSValue *value) +{ + [base defineProperty:propertyName descriptor:@{ + JSPropertyDescriptorValueKey: value, + JSPropertyDescriptorWritableKey: @YES, + JSPropertyDescriptorEnumerableKey: @NO, + JSPropertyDescriptorConfigurableKey: @YES + }]; +} + +static bool isInitFamilyMethod(NSString *name) +{ + NSUInteger i = 0; + + // Skip over initial underscores. + for (; i < [name length]; ++i) { + if ([name characterAtIndex:i] != '_') + break; + } + + // Match 'init'. + NSUInteger initIndex = 0; + NSString* init = @"init"; + for (; i < [name length] && initIndex < [init length]; ++i, ++initIndex) { + if ([name characterAtIndex:i] != [init characterAtIndex:initIndex]) + return false; + } + + // We didn't match all of 'init'. + if (initIndex < [init length]) + return false; + + // If we're at the end or the next character is a capital letter then this is an init-family selector. + return i == [name length] || [[NSCharacterSet uppercaseLetterCharacterSet] characterIsMember:[name characterAtIndex:i]]; +} + +static bool shouldSkipMethodWithName(NSString *name) +{ + // For clients that don't support init-based constructors just copy + // over the init method as we would have before. + if (!supportsInitMethodConstructors()) + return false; + + // Skip over init family methods because we handle those specially + // for the purposes of hooking up the constructor correctly. + return isInitFamilyMethod(name); +} + +// This method will iterate over the set of required methods in the protocol, and: +// * Determine a property name (either via a renameMap or default conversion). +// * If an accessorMap is provided, and contains this name, store the method in the map. +// * Otherwise, if the object doesn't already contain a property with name, create it. +static void copyMethodsToObject(JSContext *context, Class objcClass, Protocol *protocol, BOOL isInstanceMethod, JSValue *object, NSMutableDictionary *accessorMethods = nil) +{ + NSMutableDictionary *renameMap = createRenameMap(protocol, isInstanceMethod); + + forEachMethodInProtocol(protocol, YES, isInstanceMethod, ^(SEL sel, const char* types){ + const char* nameCStr = sel_getName(sel); + NSString *name = @(nameCStr); + + if (shouldSkipMethodWithName(name)) + return; + + if (accessorMethods && accessorMethods[name]) { + JSObjectRef method = objCCallbackFunctionForMethod(context, objcClass, protocol, isInstanceMethod, sel, types); + if (!method) + return; + accessorMethods[name] = [JSValue valueWithJSValueRef:method inContext:context]; + } else { + name = renameMap[name]; + if (!name) + name = selectorToPropertyName(nameCStr); + if ([object hasProperty:name]) + return; + JSObjectRef method = objCCallbackFunctionForMethod(context, objcClass, protocol, isInstanceMethod, sel, types); + if (method) + putNonEnumerable(object, name, [JSValue valueWithJSValueRef:method inContext:context]); + } + }); + + [renameMap release]; +} + +static bool parsePropertyAttributes(objc_property_t property, char*& getterName, char*& setterName) +{ + bool readonly = false; + unsigned attributeCount; + objc_property_attribute_t* attributes = property_copyAttributeList(property, &attributeCount); + if (attributeCount) { + for (unsigned i = 0; i < attributeCount; ++i) { + switch (*(attributes[i].name)) { + case 'G': + getterName = strdup(attributes[i].value); + break; + case 'S': + setterName = strdup(attributes[i].value); + break; + case 'R': + readonly = true; + break; + default: + break; + } + } + free(attributes); + } + return readonly; +} + +static char* makeSetterName(const char* name) +{ + size_t nameLength = strlen(name); + char* setterName = (char*)malloc(nameLength + 5); // "set" Name ":\0" + setterName[0] = 's'; + setterName[1] = 'e'; + setterName[2] = 't'; + setterName[3] = toupper(*name); + memcpy(setterName + 4, name + 1, nameLength - 1); + setterName[nameLength + 3] = ':'; + setterName[nameLength + 4] = '\0'; + return setterName; +} + +static void copyPrototypeProperties(JSContext *context, Class objcClass, Protocol *protocol, JSValue *prototypeValue) +{ + // First gather propreties into this list, then handle the methods (capturing the accessor methods). + struct Property { + const char* name; + char* getterName; + char* setterName; + }; + __block Vector propertyList; + + // Map recording the methods used as getters/setters. + NSMutableDictionary *accessorMethods = [NSMutableDictionary dictionary]; + + // Useful value. + JSValue *undefined = [JSValue valueWithUndefinedInContext:context]; + + forEachPropertyInProtocol(protocol, ^(objc_property_t property){ + char* getterName = 0; + char* setterName = 0; + bool readonly = parsePropertyAttributes(property, getterName, setterName); + const char* name = property_getName(property); + + // Add the names of the getter & setter methods to + if (!getterName) + getterName = strdup(name); + accessorMethods[@(getterName)] = undefined; + if (!readonly) { + if (!setterName) + setterName = makeSetterName(name); + accessorMethods[@(setterName)] = undefined; + } + + // Add the properties to a list. + propertyList.append((Property){ name, getterName, setterName }); + }); + + // Copy methods to the prototype, capturing accessors in the accessorMethods map. + copyMethodsToObject(context, objcClass, protocol, YES, prototypeValue, accessorMethods); + + // Iterate the propertyList & generate accessor properties. + for (size_t i = 0; i < propertyList.size(); ++i) { + Property& property = propertyList[i]; + + JSValue *getter = accessorMethods[@(property.getterName)]; + free(property.getterName); + ASSERT(![getter isUndefined]); + + JSValue *setter = undefined; + if (property.setterName) { + setter = accessorMethods[@(property.setterName)]; + free(property.setterName); + ASSERT(![setter isUndefined]); + } + + [prototypeValue defineProperty:@(property.name) descriptor:@{ + JSPropertyDescriptorGetKey: getter, + JSPropertyDescriptorSetKey: setter, + JSPropertyDescriptorEnumerableKey: @NO, + JSPropertyDescriptorConfigurableKey: @YES + }]; + } +} + +@interface JSObjCClassInfo : NSObject { + JSContext *m_context; + Class m_class; + bool m_block; + JSClassRef m_classRef; + JSC::Weak m_prototype; + JSC::Weak m_constructor; +} + +- (id)initWithContext:(JSContext *)context forClass:(Class)cls; +- (JSC::JSObject *)wrapperForObject:(id)object; +- (JSC::JSObject *)constructor; +- (JSC::JSObject *)prototype; + +@end + +@implementation JSObjCClassInfo + +- (id)initWithContext:(JSContext *)context forClass:(Class)cls +{ + self = [super init]; + if (!self) + return nil; + + const char* className = class_getName(cls); + m_context = context; + m_class = cls; + m_block = [cls isSubclassOfClass:getNSBlockClass()]; + JSClassDefinition definition; + definition = kJSClassDefinitionEmpty; + definition.className = className; + m_classRef = JSClassCreate(&definition); + + return self; +} + +- (void)dealloc +{ + JSClassRelease(m_classRef); + [super dealloc]; +} + +static JSC::JSObject* allocateConstructorForCustomClass(JSContext *context, const char* className, Class cls) +{ + if (!supportsInitMethodConstructors()) + return constructorWithCustomBrand(context, [NSString stringWithFormat:@"%sConstructor", className], cls); + + // For each protocol that the class implements, gather all of the init family methods into a hash table. + __block HashMap initTable; + Protocol *exportProtocol = getJSExportProtocol(); + for (Class currentClass = cls; currentClass; currentClass = class_getSuperclass(currentClass)) { + forEachProtocolImplementingProtocol(currentClass, exportProtocol, ^(Protocol *protocol) { + forEachMethodInProtocol(protocol, YES, YES, ^(SEL selector, const char*) { + const char* name = sel_getName(selector); + if (!isInitFamilyMethod(@(name))) + return; + initTable.set(name, protocol); + }); + }); + } + + for (Class currentClass = cls; currentClass; currentClass = class_getSuperclass(currentClass)) { + __block unsigned numberOfInitsFound = 0; + __block SEL initMethod = 0; + __block Protocol *initProtocol = 0; + __block const char* types = 0; + forEachMethodInClass(currentClass, ^(Method method) { + SEL selector = method_getName(method); + const char* name = sel_getName(selector); + auto iter = initTable.find(name); + + if (iter == initTable.end()) + return; + + numberOfInitsFound++; + initMethod = selector; + initProtocol = iter->value; + types = method_getTypeEncoding(method); + }); + + if (!numberOfInitsFound) + continue; + + if (numberOfInitsFound > 1) { + NSLog(@"ERROR: Class %@ exported more than one init family method via JSExport. Class %@ will not have a callable JavaScript constructor function.", cls, cls); + break; + } + + JSObjectRef method = objCCallbackFunctionForInit(context, cls, initProtocol, initMethod, types); + return toJS(method); + } + return constructorWithCustomBrand(context, [NSString stringWithFormat:@"%sConstructor", className], cls); +} + +typedef std::pair ConstructorPrototypePair; + +- (ConstructorPrototypePair)allocateConstructorAndPrototype +{ + JSObjCClassInfo* superClassInfo = [m_context.wrapperMap classInfoForClass:class_getSuperclass(m_class)]; + + ASSERT(!m_constructor || !m_prototype); + ASSERT((m_class == [NSObject class]) == !superClassInfo); + + JSC::JSObject* jsPrototype = m_prototype.get(); + JSC::JSObject* jsConstructor = m_constructor.get(); + + if (!superClassInfo) { + JSContextRef cContext = [m_context JSGlobalContextRef]; + JSValue *constructor = m_context[@"Object"]; + if (!jsConstructor) + jsConstructor = toJS(JSValueToObject(cContext, valueInternalValue(constructor), 0)); + + if (!jsPrototype) { + JSValue *prototype = constructor[@"prototype"]; + jsPrototype = toJS(JSValueToObject(cContext, valueInternalValue(prototype), 0)); + } + } else { + const char* className = class_getName(m_class); + + // Create or grab the prototype/constructor pair. + if (!jsPrototype) + jsPrototype = objectWithCustomBrand(m_context, [NSString stringWithFormat:@"%sPrototype", className]); + + if (!jsConstructor) + jsConstructor = allocateConstructorForCustomClass(m_context, className, m_class); + + JSValue* prototype = [JSValue valueWithJSValueRef:toRef(jsPrototype) inContext:m_context]; + JSValue* constructor = [JSValue valueWithJSValueRef:toRef(jsConstructor) inContext:m_context]; + putNonEnumerable(prototype, @"constructor", constructor); + putNonEnumerable(constructor, @"prototype", prototype); + + Protocol *exportProtocol = getJSExportProtocol(); + forEachProtocolImplementingProtocol(m_class, exportProtocol, ^(Protocol *protocol){ + copyPrototypeProperties(m_context, m_class, protocol, prototype); + copyMethodsToObject(m_context, m_class, protocol, NO, constructor); + }); + + // Set [Prototype]. + JSC::JSObject* superClassPrototype = [superClassInfo prototype]; + JSObjectSetPrototype([m_context JSGlobalContextRef], toRef(jsPrototype), toRef(superClassPrototype)); + } + + m_prototype = jsPrototype; + m_constructor = jsConstructor; + return ConstructorPrototypePair(jsConstructor, jsPrototype); +} + +- (JSC::JSObject*)wrapperForObject:(id)object +{ + ASSERT([object isKindOfClass:m_class]); + ASSERT(m_block == [object isKindOfClass:getNSBlockClass()]); + if (m_block) { + if (JSObjectRef method = objCCallbackFunctionForBlock(m_context, object)) { + JSValue *constructor = [JSValue valueWithJSValueRef:method inContext:m_context]; + JSValue *prototype = [JSValue valueWithNewObjectInContext:m_context]; + putNonEnumerable(constructor, @"prototype", prototype); + putNonEnumerable(prototype, @"constructor", constructor); + return toJS(method); + } + } + + JSC::JSObject* prototype = [self prototype]; + + JSC::JSObject* wrapper = makeWrapper([m_context JSGlobalContextRef], m_classRef, object); + JSObjectSetPrototype([m_context JSGlobalContextRef], toRef(wrapper), toRef(prototype)); + return wrapper; +} + +- (JSC::JSObject*)constructor +{ + JSC::JSObject* constructor = m_constructor.get(); + if (!constructor) + constructor = [self allocateConstructorAndPrototype].first; + ASSERT(!!constructor); + return constructor; +} + +- (JSC::JSObject*)prototype +{ + JSC::JSObject* prototype = m_prototype.get(); + if (!prototype) + prototype = [self allocateConstructorAndPrototype].second; + ASSERT(!!prototype); + return prototype; +} + +@end + +@implementation JSWrapperMap { + JSContext *m_context; + NSMutableDictionary *m_classMap; + std::unique_ptr> m_cachedJSWrappers; + NSMapTable *m_cachedObjCWrappers; +} + +- (id)initWithContext:(JSContext *)context +{ + self = [super init]; + if (!self) + return nil; + + NSPointerFunctionsOptions keyOptions = NSPointerFunctionsOpaqueMemory | NSPointerFunctionsOpaquePersonality; + NSPointerFunctionsOptions valueOptions = NSPointerFunctionsWeakMemory | NSPointerFunctionsObjectPersonality; + m_cachedObjCWrappers = [[NSMapTable alloc] initWithKeyOptions:keyOptions valueOptions:valueOptions capacity:0]; + + m_cachedJSWrappers = std::make_unique>(toJS([context JSGlobalContextRef])->vm()); + + m_context = context; + m_classMap = [[NSMutableDictionary alloc] init]; + return self; +} + +- (void)dealloc +{ + [m_cachedObjCWrappers release]; + [m_classMap release]; + [super dealloc]; +} + +- (JSObjCClassInfo*)classInfoForClass:(Class)cls +{ + if (!cls) + return nil; + + // Check if we've already created a JSObjCClassInfo for this Class. + if (JSObjCClassInfo* classInfo = (JSObjCClassInfo*)m_classMap[cls]) + return classInfo; + + // Skip internal classes beginning with '_' - just copy link to the parent class's info. + if ('_' == *class_getName(cls)) + return m_classMap[cls] = [self classInfoForClass:class_getSuperclass(cls)]; + + return m_classMap[cls] = [[[JSObjCClassInfo alloc] initWithContext:m_context forClass:cls] autorelease]; +} + +- (JSValue *)jsWrapperForObject:(id)object +{ + JSC::JSObject* jsWrapper = m_cachedJSWrappers->get(object); + if (jsWrapper) + return [JSValue valueWithJSValueRef:toRef(jsWrapper) inContext:m_context]; + + if (class_isMetaClass(object_getClass(object))) + jsWrapper = [[self classInfoForClass:(Class)object] constructor]; + else { + JSObjCClassInfo* classInfo = [self classInfoForClass:[object class]]; + jsWrapper = [classInfo wrapperForObject:object]; + } + + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=105891 + // This general approach to wrapper caching is pretty effective, but there are a couple of problems: + // (1) For immortal objects JSValues will effectively leak and this results in error output being logged - we should avoid adding associated objects to immortal objects. + // (2) A long lived object may rack up many JSValues. When the contexts are released these will unprotect the associated JavaScript objects, + // but still, would probably nicer if we made it so that only one associated object was required, broadcasting object dealloc. + m_cachedJSWrappers->set(object, jsWrapper); + return [JSValue valueWithJSValueRef:toRef(jsWrapper) inContext:m_context]; +} + +- (JSValue *)objcWrapperForJSValueRef:(JSValueRef)value +{ + JSValue *wrapper = static_cast(NSMapGet(m_cachedObjCWrappers, value)); + if (!wrapper) { + wrapper = [[[JSValue alloc] initWithValue:value inContext:m_context] autorelease]; + NSMapInsert(m_cachedObjCWrappers, value, wrapper); + } + return wrapper; +} + +@end + +id tryUnwrapObjcObject(JSGlobalContextRef context, JSValueRef value) +{ + if (!JSValueIsObject(context, value)) + return nil; + JSValueRef exception = 0; + JSObjectRef object = JSValueToObject(context, value, &exception); + ASSERT(!exception); + JSC::JSLockHolder locker(toJS(context)); + if (toJS(object)->inherits(JSC::JSCallbackObject::info())) + return (id)JSC::jsCast(toJS(object))->wrappedObject(); + if (id target = tryUnwrapConstructor(object)) + return target; + return nil; +} + +// This class ensures that the JSExport protocol is registered with the runtime. +NS_ROOT_CLASS @interface JSExport +@end +@implementation JSExport +@end + +bool supportsInitMethodConstructors() +{ +#if PLATFORM(APPLETV) + // There are no old clients on Apple TV, so there's no need for backwards compatibility. + return true; +#else + // First check to see the version of JavaScriptCore we directly linked against. + static int32_t versionOfLinkTimeJavaScriptCore = 0; + if (!versionOfLinkTimeJavaScriptCore) + versionOfLinkTimeJavaScriptCore = NSVersionOfLinkTimeLibrary("JavaScriptCore"); + // Only do the link time version comparison if we linked directly with JavaScriptCore + if (versionOfLinkTimeJavaScriptCore != -1) + return versionOfLinkTimeJavaScriptCore >= firstJavaScriptCoreVersionWithInitConstructorSupport; + + // If we didn't link directly with JavaScriptCore, + // base our check on what SDK was used to build the application. + static uint32_t programSDKVersion = 0; + if (!programSDKVersion) + programSDKVersion = dyld_get_program_sdk_version(); + + return programSDKVersion >= firstSDKVersionWithInitConstructorSupport; +#endif +} + +Protocol *getJSExportProtocol() +{ + static Protocol *protocol = objc_getProtocol("JSExport"); + return protocol; +} + +Class getNSBlockClass() +{ + static Class cls = objc_getClass("NSBlock"); + return cls; +} + +#endif diff --git a/API/JavaScript.h b/API/JavaScript.h new file mode 100644 index 0000000..251e393 --- /dev/null +++ b/API/JavaScript.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * Copyright (C) 2008 Alp Toker + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JavaScript_h +#define JavaScript_h + +#include +#include +#include +#include +#include +#include + +#endif /* JavaScript_h */ diff --git a/API/JavaScriptCore.h b/API/JavaScriptCore.h new file mode 100644 index 0000000..b2fde1d --- /dev/null +++ b/API/JavaScriptCore.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JavaScriptCore_h +#define JavaScriptCore_h + +#include +#include + +#if defined(__OBJC__) && JSC_OBJC_API_ENABLED + +#import "JSContext.h" +#import "JSValue.h" +#import "JSManagedValue.h" +#import "JSVirtualMachine.h" +#import "JSExport.h" + +#endif + +#endif /* JavaScriptCore_h */ diff --git a/API/ObjCCallbackFunction.h b/API/ObjCCallbackFunction.h new file mode 100644 index 0000000..1c0995e --- /dev/null +++ b/API/ObjCCallbackFunction.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef ObjCCallbackFunction_h +#define ObjCCallbackFunction_h + +#include + +#if JSC_OBJC_API_ENABLED + +#import + +#if defined(__OBJC__) +JSObjectRef objCCallbackFunctionForMethod(JSContext *, Class, Protocol *, BOOL isInstanceMethod, SEL, const char* types); +JSObjectRef objCCallbackFunctionForBlock(JSContext *, id); +JSObjectRef objCCallbackFunctionForInit(JSContext *, Class, Protocol *, SEL, const char* types); + +id tryUnwrapConstructor(JSObjectRef); +#endif + +namespace JSC { + +class ObjCCallbackFunctionImpl; + +class ObjCCallbackFunction : public InternalFunction { + friend struct APICallbackFunction; +public: + typedef InternalFunction Base; + + static ObjCCallbackFunction* create(VM&, JSGlobalObject*, const String& name, std::unique_ptr); + static void destroy(JSCell*); + + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + ASSERT(globalObject); + return Structure::create(vm, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), info()); + } + + DECLARE_EXPORT_INFO; + + ObjCCallbackFunctionImpl* impl() const { return m_impl.get(); } + +protected: + ObjCCallbackFunction(VM&, Structure*, JSObjectCallAsFunctionCallback, JSObjectCallAsConstructorCallback, std::unique_ptr); + +private: + static CallType getCallData(JSCell*, CallData&); + static ConstructType getConstructData(JSCell*, ConstructData&); + + JSObjectCallAsFunctionCallback functionCallback() { return m_functionCallback; } + JSObjectCallAsConstructorCallback constructCallback() { return m_constructCallback; } + + JSObjectCallAsFunctionCallback m_functionCallback; + JSObjectCallAsConstructorCallback m_constructCallback; + std::unique_ptr m_impl; +}; + +} // namespace JSC + +#endif + +#endif // ObjCCallbackFunction_h diff --git a/API/ObjCCallbackFunction.mm b/API/ObjCCallbackFunction.mm new file mode 100644 index 0000000..ffdc0d4 --- /dev/null +++ b/API/ObjCCallbackFunction.mm @@ -0,0 +1,728 @@ +/* + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#import "JavaScriptCore.h" + +#if JSC_OBJC_API_ENABLED + +#import "APICallbackFunction.h" +#import "APICast.h" +#import "Error.h" +#import "JSCell.h" +#import "JSCInlines.h" +#import "JSContextInternal.h" +#import "JSWrapperMap.h" +#import "JSValueInternal.h" +#import "ObjCCallbackFunction.h" +#import "ObjcRuntimeExtras.h" +#import "StructureInlines.h" +#import +#import + +class CallbackArgument { + WTF_MAKE_FAST_ALLOCATED; +public: + virtual ~CallbackArgument(); + virtual void set(NSInvocation *, NSInteger, JSContext *, JSValueRef, JSValueRef*) = 0; + + std::unique_ptr m_next; +}; + +CallbackArgument::~CallbackArgument() +{ +} + +class CallbackArgumentBoolean : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef*) override + { + bool value = JSValueToBoolean([context JSGlobalContextRef], argument); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +template +class CallbackArgumentInteger : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + T value = (T)JSC::toInt32(JSValueToNumber([context JSGlobalContextRef], argument, exception)); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +template +class CallbackArgumentDouble : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + T value = (T)JSValueToNumber([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentJSValue : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef*) override + { + JSValue *value = [JSValue valueWithJSValueRef:argument inContext:context]; + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentId : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef*) override + { + id value = valueToObject(context, argument); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentOfClass : public CallbackArgument { +public: + CallbackArgumentOfClass(Class cls) + : m_class(cls) + { + } + +private: + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + + id object = tryUnwrapObjcObject(contextRef, argument); + if (object && [object isKindOfClass:m_class.get()]) { + [invocation setArgument:&object atIndex:argumentNumber]; + return; + } + + if (JSValueIsNull(contextRef, argument) || JSValueIsUndefined(contextRef, argument)) { + object = nil; + [invocation setArgument:&object atIndex:argumentNumber]; + return; + } + + *exception = toRef(JSC::createTypeError(toJS(contextRef), ASCIILiteral("Argument does not match Objective-C Class"))); + } + + RetainPtr m_class; +}; + +class CallbackArgumentNSNumber : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + id value = valueToNumber([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentNSString : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + id value = valueToString([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentNSDate : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + id value = valueToDate([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentNSArray : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + id value = valueToArray([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentNSDictionary : public CallbackArgument { + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef* exception) override + { + id value = valueToDictionary([context JSGlobalContextRef], argument, exception); + [invocation setArgument:&value atIndex:argumentNumber]; + } +}; + +class CallbackArgumentStruct : public CallbackArgument { +public: + CallbackArgumentStruct(NSInvocation *conversionInvocation, const char* encodedType) + : m_conversionInvocation(conversionInvocation) + , m_buffer(encodedType) + { + } + +private: + void set(NSInvocation *invocation, NSInteger argumentNumber, JSContext *context, JSValueRef argument, JSValueRef*) override + { + JSValue *value = [JSValue valueWithJSValueRef:argument inContext:context]; + [m_conversionInvocation invokeWithTarget:value]; + [m_conversionInvocation getReturnValue:m_buffer]; + [invocation setArgument:m_buffer atIndex:argumentNumber]; + } + + RetainPtr m_conversionInvocation; + StructBuffer m_buffer; +}; + +class ArgumentTypeDelegate { +public: + typedef std::unique_ptr ResultType; + + template + static ResultType typeInteger() + { + return std::make_unique>(); + } + + template + static ResultType typeDouble() + { + return std::make_unique>(); + } + + static ResultType typeBool() + { + return std::make_unique(); + } + + static ResultType typeVoid() + { + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; + } + + static ResultType typeId() + { + return std::make_unique(); + } + + static ResultType typeOfClass(const char* begin, const char* end) + { + StringRange copy(begin, end); + Class cls = objc_getClass(copy); + if (!cls) + return nullptr; + + if (cls == [JSValue class]) + return std::make_unique(); + if (cls == [NSString class]) + return std::make_unique(); + if (cls == [NSNumber class]) + return std::make_unique(); + if (cls == [NSDate class]) + return std::make_unique(); + if (cls == [NSArray class]) + return std::make_unique(); + if (cls == [NSDictionary class]) + return std::make_unique(); + + return std::make_unique(cls); + } + + static ResultType typeBlock(const char*, const char*) + { + return nullptr; + } + + static ResultType typeStruct(const char* begin, const char* end) + { + StringRange copy(begin, end); + if (NSInvocation *invocation = valueToTypeInvocationFor(copy)) + return std::make_unique(invocation, copy); + return nullptr; + } +}; + +class CallbackResult { + WTF_MAKE_FAST_ALLOCATED; +public: + virtual ~CallbackResult() + { + } + + virtual JSValueRef get(NSInvocation *, JSContext *, JSValueRef*) = 0; +}; + +class CallbackResultVoid : public CallbackResult { + JSValueRef get(NSInvocation *, JSContext *context, JSValueRef*) override + { + return JSValueMakeUndefined([context JSGlobalContextRef]); + } +}; + +class CallbackResultId : public CallbackResult { + JSValueRef get(NSInvocation *invocation, JSContext *context, JSValueRef*) override + { + id value; + [invocation getReturnValue:&value]; + return objectToValue(context, value); + } +}; + +template +class CallbackResultNumeric : public CallbackResult { + JSValueRef get(NSInvocation *invocation, JSContext *context, JSValueRef*) override + { + T value; + [invocation getReturnValue:&value]; + return JSValueMakeNumber([context JSGlobalContextRef], value); + } +}; + +class CallbackResultBoolean : public CallbackResult { + JSValueRef get(NSInvocation *invocation, JSContext *context, JSValueRef*) override + { + bool value; + [invocation getReturnValue:&value]; + return JSValueMakeBoolean([context JSGlobalContextRef], value); + } +}; + +class CallbackResultStruct : public CallbackResult { +public: + CallbackResultStruct(NSInvocation *conversionInvocation, const char* encodedType) + : m_conversionInvocation(conversionInvocation) + , m_buffer(encodedType) + { + } + +private: + JSValueRef get(NSInvocation *invocation, JSContext *context, JSValueRef*) override + { + [invocation getReturnValue:m_buffer]; + + [m_conversionInvocation setArgument:m_buffer atIndex:2]; + [m_conversionInvocation setArgument:&context atIndex:3]; + [m_conversionInvocation invokeWithTarget:[JSValue class]]; + + JSValue *value; + [m_conversionInvocation getReturnValue:&value]; + return valueInternalValue(value); + } + + RetainPtr m_conversionInvocation; + StructBuffer m_buffer; +}; + +class ResultTypeDelegate { +public: + typedef std::unique_ptr ResultType; + + template + static ResultType typeInteger() + { + return std::make_unique>(); + } + + template + static ResultType typeDouble() + { + return std::make_unique>(); + } + + static ResultType typeBool() + { + return std::make_unique(); + } + + static ResultType typeVoid() + { + return std::make_unique(); + } + + static ResultType typeId() + { + return std::make_unique(); + } + + static ResultType typeOfClass(const char*, const char*) + { + return std::make_unique(); + } + + static ResultType typeBlock(const char*, const char*) + { + return std::make_unique(); + } + + static ResultType typeStruct(const char* begin, const char* end) + { + StringRange copy(begin, end); + if (NSInvocation *invocation = typeToValueInvocationFor(copy)) + return std::make_unique(invocation, copy); + return nullptr; + } +}; + +enum CallbackType { + CallbackInitMethod, + CallbackInstanceMethod, + CallbackClassMethod, + CallbackBlock +}; + +namespace JSC { + +class ObjCCallbackFunctionImpl { +public: + ObjCCallbackFunctionImpl(NSInvocation *invocation, CallbackType type, Class instanceClass, std::unique_ptr arguments, std::unique_ptr result) + : m_type(type) + , m_instanceClass(instanceClass) + , m_invocation(invocation) + , m_arguments(WTFMove(arguments)) + , m_result(WTFMove(result)) + { + ASSERT((type != CallbackInstanceMethod && type != CallbackInitMethod) || instanceClass); + } + + void destroy(Heap& heap) + { + // We need to explicitly release the target since we didn't call + // -retainArguments on m_invocation (and we don't want to do so). + if (m_type == CallbackBlock || m_type == CallbackClassMethod) + heap.releaseSoon(adoptNS([m_invocation.get() target])); + m_instanceClass = nil; + } + + JSValueRef call(JSContext *context, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + + id wrappedBlock() + { + return m_type == CallbackBlock ? [m_invocation target] : nil; + } + + id wrappedConstructor() + { + switch (m_type) { + case CallbackBlock: + return [m_invocation target]; + case CallbackInitMethod: + return m_instanceClass.get(); + default: + return nil; + } + } + + CallbackType type() const { return m_type; } + + bool isConstructible() + { + return !!wrappedBlock() || m_type == CallbackInitMethod; + } + + String name(); + +private: + CallbackType m_type; + RetainPtr m_instanceClass; + RetainPtr m_invocation; + std::unique_ptr m_arguments; + std::unique_ptr m_result; +}; + +static JSValueRef objCCallbackFunctionCallAsFunction(JSContextRef callerContext, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + // Retake the API lock - we need this for a few reasons: + // (1) We don't want to support the C-API's confusing drops-locks-once policy - should only drop locks if we can do so recursively. + // (2) We're calling some JSC internals that require us to be on the 'inside' - e.g. createTypeError. + // (3) We need to be locked (per context would be fine) against conflicting usage of the ObjCCallbackFunction's NSInvocation. + JSC::JSLockHolder locker(toJS(callerContext)); + + ObjCCallbackFunction* callback = static_cast(toJS(function)); + ObjCCallbackFunctionImpl* impl = callback->impl(); + JSContext *context = [JSContext contextWithJSGlobalContextRef:toGlobalRef(callback->globalObject()->globalExec())]; + + if (impl->type() == CallbackInitMethod) { + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + *exception = toRef(JSC::createTypeError(toJS(contextRef), ASCIILiteral("Cannot call a class constructor without |new|"))); + return JSValueMakeUndefined(contextRef); + } + + CallbackData callbackData; + JSValueRef result; + @autoreleasepool { + [context beginCallbackWithData:&callbackData calleeValue:function thisValue:thisObject argumentCount:argumentCount arguments:arguments]; + result = impl->call(context, thisObject, argumentCount, arguments, exception); + if (context.exception) + *exception = valueInternalValue(context.exception); + [context endCallbackWithData:&callbackData]; + } + return result; +} + +static JSObjectRef objCCallbackFunctionCallAsConstructor(JSContextRef callerContext, JSObjectRef constructor, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + JSC::JSLockHolder locker(toJS(callerContext)); + + ObjCCallbackFunction* callback = static_cast(toJS(constructor)); + ObjCCallbackFunctionImpl* impl = callback->impl(); + JSContext *context = [JSContext contextWithJSGlobalContextRef:toGlobalRef(toJS(callerContext)->lexicalGlobalObject()->globalExec())]; + + CallbackData callbackData; + JSValueRef result; + @autoreleasepool { + [context beginCallbackWithData:&callbackData calleeValue:constructor thisValue:nullptr argumentCount:argumentCount arguments:arguments]; + result = impl->call(context, nullptr, argumentCount, arguments, exception); + if (context.exception) + *exception = valueInternalValue(context.exception); + [context endCallbackWithData:&callbackData]; + } + + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + if (*exception) + return nullptr; + + if (!JSValueIsObject(contextRef, result)) { + *exception = toRef(JSC::createTypeError(toJS(contextRef), ASCIILiteral("Objective-C blocks called as constructors must return an object."))); + return nullptr; + } + return (JSObjectRef)result; +} + +const JSC::ClassInfo ObjCCallbackFunction::s_info = { "CallbackFunction", &Base::s_info, 0, CREATE_METHOD_TABLE(ObjCCallbackFunction) }; + +ObjCCallbackFunction::ObjCCallbackFunction(JSC::VM& vm, JSC::Structure* structure, JSObjectCallAsFunctionCallback functionCallback, JSObjectCallAsConstructorCallback constructCallback, std::unique_ptr impl) + : Base(vm, structure) + , m_functionCallback(functionCallback) + , m_constructCallback(constructCallback) + , m_impl(WTFMove(impl)) +{ +} + +ObjCCallbackFunction* ObjCCallbackFunction::create(JSC::VM& vm, JSC::JSGlobalObject* globalObject, const String& name, std::unique_ptr impl) +{ + Structure* structure = globalObject->objcCallbackFunctionStructure(); + ObjCCallbackFunction* function = new (NotNull, allocateCell(vm.heap)) ObjCCallbackFunction(vm, structure, objCCallbackFunctionCallAsFunction, objCCallbackFunctionCallAsConstructor, WTFMove(impl)); + function->finishCreation(vm, name); + return function; +} + +void ObjCCallbackFunction::destroy(JSCell* cell) +{ + ObjCCallbackFunction& function = *jsCast(cell); + function.impl()->destroy(*Heap::heap(cell)); + function.~ObjCCallbackFunction(); +} + + +CallType ObjCCallbackFunction::getCallData(JSCell*, CallData& callData) +{ + callData.native.function = APICallbackFunction::call; + return CallType::Host; +} + +ConstructType ObjCCallbackFunction::getConstructData(JSCell* cell, ConstructData& constructData) +{ + ObjCCallbackFunction* callback = jsCast(cell); + if (!callback->impl()->isConstructible()) + return Base::getConstructData(cell, constructData); + constructData.native.function = APICallbackFunction::construct; + return ConstructType::Host; +} + +String ObjCCallbackFunctionImpl::name() +{ + if (m_type == CallbackInitMethod) + return class_getName(m_instanceClass.get()); + // FIXME: Maybe we could support having the selector as the name of the non-init + // functions to make it a bit more user-friendly from the JS side? + return ""; +} + +JSValueRef ObjCCallbackFunctionImpl::call(JSContext *context, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + JSGlobalContextRef contextRef = [context JSGlobalContextRef]; + + id target; + size_t firstArgument; + switch (m_type) { + case CallbackInitMethod: { + RELEASE_ASSERT(!thisObject); + target = [m_instanceClass alloc]; + if (!target || ![target isKindOfClass:m_instanceClass.get()]) { + *exception = toRef(JSC::createTypeError(toJS(contextRef), ASCIILiteral("self type check failed for Objective-C instance method"))); + return JSValueMakeUndefined(contextRef); + } + [m_invocation setTarget:target]; + firstArgument = 2; + break; + } + case CallbackInstanceMethod: { + target = tryUnwrapObjcObject(contextRef, thisObject); + if (!target || ![target isKindOfClass:m_instanceClass.get()]) { + *exception = toRef(JSC::createTypeError(toJS(contextRef), ASCIILiteral("self type check failed for Objective-C instance method"))); + return JSValueMakeUndefined(contextRef); + } + [m_invocation setTarget:target]; + firstArgument = 2; + break; + } + case CallbackClassMethod: + firstArgument = 2; + break; + case CallbackBlock: + firstArgument = 1; + } + + size_t argumentNumber = 0; + for (CallbackArgument* argument = m_arguments.get(); argument; argument = argument->m_next.get()) { + JSValueRef value = argumentNumber < argumentCount ? arguments[argumentNumber] : JSValueMakeUndefined(contextRef); + argument->set(m_invocation.get(), argumentNumber + firstArgument, context, value, exception); + if (*exception) + return JSValueMakeUndefined(contextRef); + ++argumentNumber; + } + + [m_invocation invoke]; + + JSValueRef result = m_result->get(m_invocation.get(), context, exception); + + // Balance our call to -alloc with a call to -autorelease. We have to do this after calling -init + // because init family methods are allowed to release the allocated object and return something + // else in its place. + if (m_type == CallbackInitMethod) { + id objcResult = tryUnwrapObjcObject(contextRef, result); + if (objcResult) + [objcResult autorelease]; + } + + return result; +} + +} // namespace JSC + +static bool blockSignatureContainsClass() +{ + static bool containsClass = ^{ + id block = ^(NSString *string){ return string; }; + return _Block_has_signature(block) && strstr(_Block_signature(block), "NSString"); + }(); + return containsClass; +} + +static inline bool skipNumber(const char*& position) +{ + if (!isASCIIDigit(*position)) + return false; + while (isASCIIDigit(*++position)) { } + return true; +} + +static JSObjectRef objCCallbackFunctionForInvocation(JSContext *context, NSInvocation *invocation, CallbackType type, Class instanceClass, const char* signatureWithObjcClasses) +{ + if (!signatureWithObjcClasses) + return nullptr; + + const char* position = signatureWithObjcClasses; + + auto result = parseObjCType(position); + if (!result || !skipNumber(position)) + return nullptr; + + switch (type) { + case CallbackInitMethod: + case CallbackInstanceMethod: + case CallbackClassMethod: + // Methods are passed two implicit arguments - (id)self, and the selector. + if ('@' != *position++ || !skipNumber(position) || ':' != *position++ || !skipNumber(position)) + return nullptr; + break; + case CallbackBlock: + // Blocks are passed one implicit argument - the block, of type "@?". + if (('@' != *position++) || ('?' != *position++) || !skipNumber(position)) + return nullptr; + // Only allow arguments of type 'id' if the block signature contains the NS type information. + if ((!blockSignatureContainsClass() && strchr(position, '@'))) + return nullptr; + break; + } + + std::unique_ptr arguments; + auto* nextArgument = &arguments; + unsigned argumentCount = 0; + while (*position) { + auto argument = parseObjCType(position); + if (!argument || !skipNumber(position)) + return nullptr; + + *nextArgument = WTFMove(argument); + nextArgument = &(*nextArgument)->m_next; + ++argumentCount; + } + + JSC::ExecState* exec = toJS([context JSGlobalContextRef]); + JSC::JSLockHolder locker(exec); + auto impl = std::make_unique(invocation, type, instanceClass, WTFMove(arguments), WTFMove(result)); + const String& name = impl->name(); + return toRef(JSC::ObjCCallbackFunction::create(exec->vm(), exec->lexicalGlobalObject(), name, WTFMove(impl))); +} + +JSObjectRef objCCallbackFunctionForInit(JSContext *context, Class cls, Protocol *protocol, SEL sel, const char* types) +{ + NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:[NSMethodSignature signatureWithObjCTypes:types]]; + [invocation setSelector:sel]; + return objCCallbackFunctionForInvocation(context, invocation, CallbackInitMethod, cls, _protocol_getMethodTypeEncoding(protocol, sel, YES, YES)); +} + +JSObjectRef objCCallbackFunctionForMethod(JSContext *context, Class cls, Protocol *protocol, BOOL isInstanceMethod, SEL sel, const char* types) +{ + NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:[NSMethodSignature signatureWithObjCTypes:types]]; + [invocation setSelector:sel]; + // We need to retain the target Class because m_invocation doesn't retain it by default (and we don't want it to). + // FIXME: What releases it? + if (!isInstanceMethod) + [invocation setTarget:[cls retain]]; + return objCCallbackFunctionForInvocation(context, invocation, isInstanceMethod ? CallbackInstanceMethod : CallbackClassMethod, isInstanceMethod ? cls : nil, _protocol_getMethodTypeEncoding(protocol, sel, YES, isInstanceMethod)); +} + +JSObjectRef objCCallbackFunctionForBlock(JSContext *context, id target) +{ + if (!_Block_has_signature(target)) + return nullptr; + const char* signature = _Block_signature(target); + NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:[NSMethodSignature signatureWithObjCTypes:signature]]; + + // We don't want to use -retainArguments because that leaks memory. Arguments + // would be retained indefinitely between invocations of the callback. + // Additionally, we copy the target because we want the block to stick around + // until the ObjCCallbackFunctionImpl is destroyed. + [invocation setTarget:[target copy]]; + + return objCCallbackFunctionForInvocation(context, invocation, CallbackBlock, nil, signature); +} + +id tryUnwrapConstructor(JSObjectRef object) +{ + if (!toJS(object)->inherits(JSC::ObjCCallbackFunction::info())) + return nil; + JSC::ObjCCallbackFunctionImpl* impl = static_cast(toJS(object))->impl(); + if (!impl->isConstructible()) + return nil; + return impl->wrappedConstructor(); +} + +#endif diff --git a/API/ObjcRuntimeExtras.h b/API/ObjcRuntimeExtras.h new file mode 100644 index 0000000..128df5c --- /dev/null +++ b/API/ObjcRuntimeExtras.h @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import +#import +#import + +inline bool protocolImplementsProtocol(Protocol *candidate, Protocol *target) +{ + unsigned protocolProtocolsCount; + Protocol ** protocolProtocols = protocol_copyProtocolList(candidate, &protocolProtocolsCount); + for (unsigned i = 0; i < protocolProtocolsCount; ++i) { + if (protocol_isEqual(protocolProtocols[i], target)) { + free(protocolProtocols); + return true; + } + } + free(protocolProtocols); + return false; +} + +inline void forEachProtocolImplementingProtocol(Class cls, Protocol *target, void (^callback)(Protocol *)) +{ + ASSERT(cls); + ASSERT(target); + + Vector worklist; + HashSet visited; + + // Initially fill the worklist with the Class's protocols. + unsigned protocolsCount; + Protocol ** protocols = class_copyProtocolList(cls, &protocolsCount); + worklist.append(protocols, protocolsCount); + free(protocols); + + while (!worklist.isEmpty()) { + Protocol *protocol = worklist.last(); + worklist.removeLast(); + + // Are we encountering this Protocol for the first time? + if (!visited.add(protocol).isNewEntry) + continue; + + // If it implements the protocol, make the callback. + if (protocolImplementsProtocol(protocol, target)) + callback(protocol); + + // Add incorporated protocols to the worklist. + protocols = protocol_copyProtocolList(protocol, &protocolsCount); + worklist.append(protocols, protocolsCount); + free(protocols); + } +} + +inline void forEachMethodInClass(Class cls, void (^callback)(Method)) +{ + unsigned count; + Method* methods = class_copyMethodList(cls, &count); + for (unsigned i = 0; i < count; ++i) + callback(methods[i]); + free(methods); +} + +inline void forEachMethodInProtocol(Protocol *protocol, BOOL isRequiredMethod, BOOL isInstanceMethod, void (^callback)(SEL, const char*)) +{ + unsigned count; + struct objc_method_description* methods = protocol_copyMethodDescriptionList(protocol, isRequiredMethod, isInstanceMethod, &count); + for (unsigned i = 0; i < count; ++i) + callback(methods[i].name, methods[i].types); + free(methods); +} + +inline void forEachPropertyInProtocol(Protocol *protocol, void (^callback)(objc_property_t)) +{ + unsigned count; + objc_property_t* properties = protocol_copyPropertyList(protocol, &count); + for (unsigned i = 0; i < count; ++i) + callback(properties[i]); + free(properties); +} + +template +void skipPair(const char*& position) +{ + size_t count = 1; + do { + char c = *position++; + if (!c) + @throw [NSException exceptionWithName:NSInternalInconsistencyException reason:@"Malformed type encoding" userInfo:nil]; + if (c == open) + ++count; + else if (c == close) + --count; + } while (count); +} + +class StringRange { + WTF_MAKE_NONCOPYABLE(StringRange); +public: + StringRange(const char* begin, const char* end) : m_ptr(strndup(begin, end - begin)) { } + ~StringRange() { free(m_ptr); } + operator const char*() const { return m_ptr; } + const char* get() const { return m_ptr; } + +private: + char* m_ptr; +}; + +class StructBuffer { + WTF_MAKE_NONCOPYABLE(StructBuffer); +public: + StructBuffer(const char* encodedType) + { + NSUInteger size, alignment; + NSGetSizeAndAlignment(encodedType, &size, &alignment); + --alignment; + m_allocation = static_cast(malloc(size + alignment)); + m_buffer = reinterpret_cast((reinterpret_cast(m_allocation) + alignment) & ~alignment); + } + + ~StructBuffer() { free(m_allocation); } + operator void*() const { return m_buffer; } + +private: + void* m_allocation; + void* m_buffer; +}; + +template +typename DelegateType::ResultType parseObjCType(const char*& position) +{ + ASSERT(*position); + + switch (*position++) { + case 'c': + return DelegateType::template typeInteger(); + case 'i': + return DelegateType::template typeInteger(); + case 's': + return DelegateType::template typeInteger(); + case 'l': + return DelegateType::template typeInteger(); + case 'q': + return DelegateType::template typeDouble(); + case 'C': + return DelegateType::template typeInteger(); + case 'I': + return DelegateType::template typeInteger(); + case 'S': + return DelegateType::template typeInteger(); + case 'L': + return DelegateType::template typeInteger(); + case 'Q': + return DelegateType::template typeDouble(); + case 'f': + return DelegateType::template typeDouble(); + case 'd': + return DelegateType::template typeDouble(); + case 'B': + return DelegateType::typeBool(); + case 'v': + return DelegateType::typeVoid(); + + case '@': { // An object (whether statically typed or typed id) + if (position[0] == '?' && position[1] == '<') { + position += 2; + const char* begin = position; + skipPair<'<','>'>(position); + return DelegateType::typeBlock(begin, position - 1); + } + + if (*position == '"') { + const char* begin = position + 1; + const char* protocolPosition = strchr(begin, '<'); + const char* endOfType = strchr(begin, '"'); + position = endOfType + 1; + + // There's no protocol involved in this type, so just handle the class name. + if (!protocolPosition || protocolPosition > endOfType) + return DelegateType::typeOfClass(begin, endOfType); + // We skipped the class name and went straight to the protocol, so this is an id type. + if (begin == protocolPosition) + return DelegateType::typeId(); + // We have a class name with a protocol. For now, ignore the protocol. + return DelegateType::typeOfClass(begin, protocolPosition); + } + + return DelegateType::typeId(); + } + + case '{': { // {name=type...} A structure + const char* begin = position - 1; + skipPair<'{','}'>(position); + return DelegateType::typeStruct(begin, position); + } + + // NOT supporting C strings, arrays, pointers, unions, bitfields, function pointers. + case '*': // A character string (char *) + case '[': // [array type] An array + case '(': // (name=type...) A union + case 'b': // bnum A bit field of num bits + case '^': // ^type A pointer to type + case '?': // An unknown type (among other things, this code is used for function pointers) + // NOT supporting Objective-C Class, SEL + case '#': // A class object (Class) + case ':': // A method selector (SEL) + default: + return nil; + } +} + +extern "C" { + // Forward declare some Objective-C runtime internal methods that are not API. + const char *_protocol_getMethodTypeEncoding(Protocol *, SEL, BOOL isRequiredMethod, BOOL isInstanceMethod); + id objc_initWeak(id *, id); + void objc_destroyWeak(id *); + bool _Block_has_signature(void *); + const char * _Block_signature(void *); +} diff --git a/API/OpaqueJSString.cpp b/API/OpaqueJSString.cpp new file mode 100644 index 0000000..07a79ad --- /dev/null +++ b/API/OpaqueJSString.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "OpaqueJSString.h" + +#include "CallFrame.h" +#include "Identifier.h" +#include "IdentifierInlines.h" +#include "JSGlobalObject.h" +#include + +using namespace JSC; + +RefPtr OpaqueJSString::create(const String& string) +{ + if (string.isNull()) + return nullptr; + + return adoptRef(new OpaqueJSString(string)); +} + +OpaqueJSString::~OpaqueJSString() +{ + // m_characters is put in a local here to avoid an extra atomic load. + UChar* characters = m_characters; + if (!characters) + return; + + if (!m_string.is8Bit() && m_string.characters16() == characters) + return; + + fastFree(characters); +} + +String OpaqueJSString::string() const +{ + // Return a copy of the wrapped string, because the caller may make it an Identifier. + return m_string.isolatedCopy(); +} + +Identifier OpaqueJSString::identifier(VM* vm) const +{ + if (m_string.isNull()) + return Identifier(); + + if (m_string.isEmpty()) + return Identifier(Identifier::EmptyIdentifier); + + if (m_string.is8Bit()) + return Identifier::fromString(vm, m_string.characters8(), m_string.length()); + + return Identifier::fromString(vm, m_string.characters16(), m_string.length()); +} + +const UChar* OpaqueJSString::characters() +{ + // m_characters is put in a local here to avoid an extra atomic load. + UChar* characters = m_characters; + if (characters) + return characters; + + if (m_string.isNull()) + return nullptr; + + unsigned length = m_string.length(); + UChar* newCharacters = static_cast(fastMalloc(length * sizeof(UChar))); + StringView(m_string).getCharactersWithUpconvert(newCharacters); + + if (!m_characters.compare_exchange_strong(characters, newCharacters)) { + fastFree(newCharacters); + return characters; + } + + return newCharacters; +} + +bool OpaqueJSString::equal(const OpaqueJSString* a, const OpaqueJSString* b) +{ + if (a == b) + return true; + + if (!a || !b) + return false; + + return a->m_string == b->m_string; +} diff --git a/API/OpaqueJSString.h b/API/OpaqueJSString.h new file mode 100644 index 0000000..208131b --- /dev/null +++ b/API/OpaqueJSString.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef OpaqueJSString_h +#define OpaqueJSString_h + +#include +#include +#include + +namespace JSC { + class Identifier; + class VM; +} + +struct OpaqueJSString : public ThreadSafeRefCounted { + static Ref create() + { + return adoptRef(*new OpaqueJSString); + } + + static Ref create(const LChar* characters, unsigned length) + { + return adoptRef(*new OpaqueJSString(characters, length)); + } + + static Ref create(const UChar* characters, unsigned length) + { + return adoptRef(*new OpaqueJSString(characters, length)); + } + + JS_EXPORT_PRIVATE static RefPtr create(const String&); + + JS_EXPORT_PRIVATE ~OpaqueJSString(); + + bool is8Bit() { return m_string.is8Bit(); } + const LChar* characters8() { return m_string.characters8(); } + const UChar* characters16() { return m_string.characters16(); } + unsigned length() { return m_string.length(); } + + const UChar* characters(); + + JS_EXPORT_PRIVATE String string() const; + JSC::Identifier identifier(JSC::VM*) const; + + static bool equal(const OpaqueJSString*, const OpaqueJSString*); + +private: + friend class WTF::ThreadSafeRefCounted; + + OpaqueJSString() + : m_characters(nullptr) + { + } + + OpaqueJSString(const String& string) + : m_string(string.isolatedCopy()) + , m_characters(m_string.impl() && m_string.is8Bit() ? nullptr : const_cast(m_string.characters16())) + { + } + + OpaqueJSString(const LChar* characters, unsigned length) + : m_string(characters, length) + , m_characters(nullptr) + { + } + + OpaqueJSString(const UChar* characters, unsigned length) + : m_string(characters, length) + , m_characters(m_string.impl() && m_string.is8Bit() ? nullptr : const_cast(m_string.characters16())) + { + } + + String m_string; + + // This will be initialized on demand when characters() is called if the string needs up-conversion. + std::atomic m_characters; +}; + +#endif diff --git a/API/WebKitAvailability.h b/API/WebKitAvailability.h new file mode 100644 index 0000000..ab53183 --- /dev/null +++ b/API/WebKitAvailability.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2008, 2009, 2010, 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __WebKitAvailability__ +#define __WebKitAvailability__ + +#if defined(__APPLE__) + +#include +#include + +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100 +/* To support availability macros that mention newer OS X versions when building on older OS X versions, + we provide our own definitions of the underlying macros that the availability macros expand to. We're + free to expand the macros as no-ops since frameworks built on older OS X versions only ship bundled with + an application rather than as part of the system. +*/ + +#ifndef __NSi_10_10 // Building from trunk rather than SDK. +#define __NSi_10_10 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __NSi_10_11 // Building from trunk rather than SDK. +#define __NSi_10_11 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __NSi_10_12 // Building from trunk rather than SDK. +#define __NSi_10_12 introduced=10.0 // Use 10.0 to indicate that everything is available. +#endif + +#ifndef __AVAILABILITY_INTERNAL__MAC_10_9 +#define __AVAILABILITY_INTERNAL__MAC_10_9 +#endif + +#ifndef __AVAILABILITY_INTERNAL__MAC_10_10 +#define __AVAILABILITY_INTERNAL__MAC_10_10 +#endif + +#ifndef AVAILABLE_MAC_OS_X_VERSION_10_9_AND_LATER +#define AVAILABLE_MAC_OS_X_VERSION_10_9_AND_LATER +#endif + +#ifndef AVAILABLE_MAC_OS_X_VERSION_10_10_AND_LATER +#define AVAILABLE_MAC_OS_X_VERSION_10_10_AND_LATER +#endif + +#endif /* __MAC_OS_X_VERSION_MIN_REQUIRED <= 101100 */ + +#if defined(BUILDING_GTK__) +#undef CF_AVAILABLE +#define CF_AVAILABLE(_mac, _ios) +#undef CF_ENUM_AVAILABLE +#define CF_ENUM_AVAILABLE(_mac, _ios) +#endif + +#else +#define CF_AVAILABLE(_mac, _ios) +#define CF_ENUM_AVAILABLE(_mac, _ios) +#endif + +#endif /* __WebKitAvailability__ */ diff --git a/API/tests/CompareAndSwapTest.cpp b/API/tests/CompareAndSwapTest.cpp new file mode 100644 index 0000000..e78086c --- /dev/null +++ b/API/tests/CompareAndSwapTest.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CompareAndSwapTest.h" + +#include +#include +#include + +class Bitmap { +public: + Bitmap() { clearAll(); } + + inline void clearAll(); + inline bool concurrentTestAndSet(size_t n); + inline size_t numBits() const { return words * wordSize; } + +private: + static const size_t Size = 4096*10; + + static const unsigned wordSize = sizeof(uint8_t) * 8; + static const unsigned words = (Size + wordSize - 1) / wordSize; + static const uint8_t one = 1; + + uint8_t bits[words]; +}; + +inline void Bitmap::clearAll() +{ + memset(&bits, 0, sizeof(bits)); +} + +inline bool Bitmap::concurrentTestAndSet(size_t n) +{ + uint8_t mask = one << (n % wordSize); + size_t index = n / wordSize; + uint8_t* wordPtr = &bits[index]; + uint8_t oldValue; + do { + oldValue = *wordPtr; + if (oldValue & mask) + return true; + } while (!WTF::atomicCompareExchangeWeakRelaxed(wordPtr, oldValue, static_cast(oldValue | mask))); + return false; +} + +struct Data { + Bitmap* bitmap; + int id; + int numThreads; +}; + +static void setBitThreadFunc(void* p) +{ + Data* data = reinterpret_cast(p); + Bitmap* bitmap = data->bitmap; + size_t numBits = bitmap->numBits(); + + // The computed start index here is heuristic that seems to maximize (anecdotally) + // the chance for the CAS issue to manifest. + size_t start = (numBits * (data->numThreads - data->id)) / data->numThreads; + + printf(" started Thread %d\n", data->id); + for (size_t i = start; i < numBits; i++) + while (!bitmap->concurrentTestAndSet(i)) { } + for (size_t i = 0; i < start; i++) + while (!bitmap->concurrentTestAndSet(i)) { } + + printf(" finished Thread %d\n", data->id); +} + +void testCompareAndSwap() +{ + Bitmap bitmap; + const int numThreads = 5; + ThreadIdentifier threadIDs[numThreads]; + Data data[numThreads]; + + WTF::initializeThreading(); + + printf("Starting %d threads for CompareAndSwap test. Test should complete without hanging.\n", numThreads); + for (int i = 0; i < numThreads; i++) { + data[i].bitmap = &bitmap; + data[i].id = i; + data[i].numThreads = numThreads; + std::function threadFunc = std::bind(setBitThreadFunc, &data[i]); + threadIDs[i] = createThread("setBitThreadFunc", threadFunc); + } + + printf("Waiting for %d threads to join\n", numThreads); + for (int i = 0; i < numThreads; i++) + waitForThreadCompletion(threadIDs[i]); + + printf("PASS: CompareAndSwap test completed without a hang\n"); +} diff --git a/API/tests/CompareAndSwapTest.h b/API/tests/CompareAndSwapTest.h new file mode 100644 index 0000000..4a1fc59 --- /dev/null +++ b/API/tests/CompareAndSwapTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Regression test for webkit.org/b/142513 */ +void testCompareAndSwap(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/CurrentThisInsideBlockGetterTest.h b/API/tests/CurrentThisInsideBlockGetterTest.h new file mode 100644 index 0000000..ab68f80 --- /dev/null +++ b/API/tests/CurrentThisInsideBlockGetterTest.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include + +#if JSC_OBJC_API_ENABLED + +void currentThisInsideBlockGetterTest(); + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/CurrentThisInsideBlockGetterTest.mm b/API/tests/CurrentThisInsideBlockGetterTest.mm new file mode 100644 index 0000000..5ec5420 --- /dev/null +++ b/API/tests/CurrentThisInsideBlockGetterTest.mm @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "CurrentThisInsideBlockGetterTest.h" + +#if JSC_OBJC_API_ENABLED + +#import +#import + +static JSObjectRef CallAsConstructor(JSContextRef ctx, JSObjectRef constructor, size_t, const JSValueRef[], JSValueRef*) +{ + JSObjectRef newObjectRef = NULL; + NSMutableDictionary *constructorPrivateProperties = (__bridge NSMutableDictionary *)(JSObjectGetPrivate(constructor)); + NSDictionary *constructorDescriptor = constructorPrivateProperties[@"constructorDescriptor"]; + newObjectRef = JSObjectMake(ctx, NULL, NULL); + NSDictionary *objectProperties = constructorDescriptor[@"objectProperties"]; + + if (objectProperties) { + JSValue *newObject = [JSValue valueWithJSValueRef:newObjectRef inContext:[JSContext contextWithJSGlobalContextRef:JSContextGetGlobalContext(ctx)]]; + for (NSString *objectProperty in objectProperties) { + [newObject defineProperty:objectProperty descriptor:objectProperties[objectProperty]]; + } + } + + return newObjectRef; +} + +static void ConstructorFinalize(JSObjectRef object) +{ + NSMutableDictionary *privateProperties = (__bridge NSMutableDictionary *)(JSObjectGetPrivate(object)); + CFBridgingRelease((__bridge CFTypeRef)(privateProperties)); + JSObjectSetPrivate(object, NULL); +} + +static JSClassRef ConstructorClass(void) +{ + static JSClassRef constructorClass = NULL; + + if (constructorClass == NULL) { + JSClassDefinition classDefinition = kJSClassDefinitionEmpty; + classDefinition.className = "Constructor"; + classDefinition.callAsConstructor = CallAsConstructor; + classDefinition.finalize = ConstructorFinalize; + constructorClass = JSClassCreate(&classDefinition); + } + + return constructorClass; +} + +@interface JSValue (ConstructorCreation) + ++ (JSValue *)valueWithConstructorDescriptor:(NSDictionary *)constructorDescriptor inContext:(JSContext *)context; + +@end + +@implementation JSValue (ConstructorCreation) + ++ (JSValue *)valueWithConstructorDescriptor:(id)constructorDescriptor inContext:(JSContext *)context +{ + NSMutableDictionary *privateProperties = [@{ @"constructorDescriptor" : constructorDescriptor } mutableCopy]; + JSGlobalContextRef ctx = [context JSGlobalContextRef]; + JSObjectRef constructorRef = JSObjectMake(ctx, ConstructorClass(), (void *)CFBridgingRetain(privateProperties)); + JSValue *constructor = [JSValue valueWithJSValueRef:constructorRef inContext:context]; + return constructor; +} + +@end + +@interface JSContext (ConstructorCreation) + +- (JSValue *)valueWithConstructorDescriptor:(NSDictionary *)constructorDescriptor; + +@end + +@implementation JSContext (ConstructorCreation) + +- (JSValue *)valueWithConstructorDescriptor:(id)constructorDescriptor +{ + return [JSValue valueWithConstructorDescriptor:constructorDescriptor inContext:self]; +} + +@end + +void currentThisInsideBlockGetterTest() +{ + @autoreleasepool { + JSContext *context = [[JSContext alloc] init]; + + JSValue *myConstructor = [context valueWithConstructorDescriptor:@{ + @"objectProperties" : @{ + @"currentThis" : @{ JSPropertyDescriptorGetKey : ^{ return JSContext.currentThis; } }, + }, + }]; + + JSValue *myObj1 = [myConstructor constructWithArguments:nil]; + NSLog(@"myObj1.currentThis: %@", myObj1[@"currentThis"]); + JSValue *myObj2 = [myConstructor constructWithArguments:@[ @"bar" ]]; + NSLog(@"myObj2.currentThis: %@", myObj2[@"currentThis"]); + } +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/CustomGlobalObjectClassTest.c b/API/tests/CustomGlobalObjectClassTest.c new file mode 100644 index 0000000..62e6397 --- /dev/null +++ b/API/tests/CustomGlobalObjectClassTest.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "CustomGlobalObjectClassTest.h" + +#include +#include +#include + +extern bool assertTrue(bool value, const char* message); + +static bool executedCallback = false; + +static JSValueRef jsDoSomething(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argc, const JSValueRef args[], JSValueRef* exception) +{ + (void)function; + (void)thisObject; + (void)argc; + (void)args; + (void)exception; + executedCallback = true; + return JSValueMakeNull(ctx); +} + +static JSStaticFunction bridgedFunctions[] = { + {"doSomething", jsDoSomething, kJSPropertyAttributeDontDelete}, + {0, 0, 0}, +}; + +static JSClassRef bridgedObjectClass = NULL; +static JSClassDefinition bridgedClassDef; + +static JSClassRef jsClassRef() +{ + if (!bridgedObjectClass) { + bridgedClassDef = kJSClassDefinitionEmpty; + bridgedClassDef.className = "BridgedObject"; + bridgedClassDef.staticFunctions = bridgedFunctions; + bridgedObjectClass = JSClassCreate(&bridgedClassDef); + } + return bridgedObjectClass; +} + +void customGlobalObjectClassTest() +{ + JSClassRef bridgedObjectJsClassRef = jsClassRef(); + JSGlobalContextRef globalContext = JSGlobalContextCreate(bridgedObjectJsClassRef); + + JSObjectRef globalObj = JSContextGetGlobalObject(globalContext); + + JSPropertyNameArrayRef propertyNames = JSObjectCopyPropertyNames(globalContext, globalObj); + size_t propertyCount = JSPropertyNameArrayGetCount(propertyNames); + assertTrue(propertyCount == 1, "Property count == 1"); + + JSStringRef propertyNameRef = JSPropertyNameArrayGetNameAtIndex(propertyNames, 0); + size_t propertyNameLength = JSStringGetLength(propertyNameRef); + size_t bufferSize = sizeof(char) * (propertyNameLength + 1); + char* buffer = (char*)malloc(bufferSize); + JSStringGetUTF8CString(propertyNameRef, buffer, bufferSize); + buffer[propertyNameLength] = '\0'; + assertTrue(!strncmp(buffer, "doSomething", propertyNameLength), "First property name is doSomething"); + free(buffer); + + bool hasMethod = JSObjectHasProperty(globalContext, globalObj, propertyNameRef); + assertTrue(hasMethod, "Property found by name"); + + JSValueRef doSomethingProperty = + JSObjectGetProperty(globalContext, globalObj, propertyNameRef, NULL); + assertTrue(!JSValueIsUndefined(globalContext, doSomethingProperty), "Property is defined"); + + bool globalObjectClassMatchesClassRef = JSValueIsObjectOfClass(globalContext, globalObj, bridgedObjectJsClassRef); + assertTrue(globalObjectClassMatchesClassRef, "Global object is the right class"); + + JSStringRef script = JSStringCreateWithUTF8CString("doSomething();"); + JSEvaluateScript(globalContext, script, NULL, NULL, 1, NULL); + JSStringRelease(script); + + assertTrue(executedCallback, "Executed custom global object callback"); +} + +void globalObjectSetPrototypeTest() +{ + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.className = "Global"; + JSClassRef global = JSClassCreate(&definition); + JSGlobalContextRef context = JSGlobalContextCreate(global); + JSObjectRef object = JSContextGetGlobalObject(context); + + JSObjectRef above = JSObjectMake(context, 0, 0); + JSStringRef test = JSStringCreateWithUTF8CString("test"); + JSValueRef value = JSValueMakeString(context, test); + JSObjectSetProperty(context, above, test, value, kJSPropertyAttributeDontEnum, 0); + + JSObjectSetPrototype(context, object, above); + JSStringRef script = JSStringCreateWithUTF8CString("test === \"test\""); + JSValueRef result = JSEvaluateScript(context, script, 0, 0, 0, 0); + + assertTrue(JSValueToBoolean(context, result), "test === \"test\""); + + JSStringRelease(test); + JSStringRelease(script); +} + +void globalObjectPrivatePropertyTest() +{ + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.className = "Global"; + JSClassRef global = JSClassCreate(&definition); + JSGlobalContextRef context = JSGlobalContextCreate(global); + JSObjectRef globalObject = JSContextGetGlobalObject(context); + + JSStringRef privateName = JSStringCreateWithUTF8CString("private"); + JSValueRef privateValue = JSValueMakeString(context, privateName); + assertTrue(JSObjectSetPrivateProperty(context, globalObject, privateName, privateValue), "JSObjectSetPrivateProperty succeeded"); + JSValueRef result = JSObjectGetPrivateProperty(context, globalObject, privateName); + assertTrue(JSValueIsStrictEqual(context, privateValue, result), "privateValue === \"private\""); + + assertTrue(JSObjectDeletePrivateProperty(context, globalObject, privateName), "JSObjectDeletePrivateProperty succeeded"); + result = JSObjectGetPrivateProperty(context, globalObject, privateName); + assertTrue(JSValueIsNull(context, result), "Deleted private property is indeed no longer present"); + + JSStringRelease(privateName); +} diff --git a/API/tests/CustomGlobalObjectClassTest.h b/API/tests/CustomGlobalObjectClassTest.h new file mode 100644 index 0000000..3d2a520 --- /dev/null +++ b/API/tests/CustomGlobalObjectClassTest.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +void customGlobalObjectClassTest(void); +void globalObjectSetPrototypeTest(void); +void globalObjectPrivatePropertyTest(void); diff --git a/API/tests/DateTests.h b/API/tests/DateTests.h new file mode 100644 index 0000000..eeb47a1 --- /dev/null +++ b/API/tests/DateTests.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import + +#if JSC_OBJC_API_ENABLED + +void runDateTests(); + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/DateTests.mm b/API/tests/DateTests.mm new file mode 100644 index 0000000..e2837a6 --- /dev/null +++ b/API/tests/DateTests.mm @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import "DateTests.h" +#import + +#if JSC_OBJC_API_ENABLED + +extern "C" void checkResult(NSString *description, bool passed); + +@interface DateTests : NSObject ++ (void) NSDateToJSDateTest; ++ (void) JSDateToNSDateTest; ++ (void) roundTripThroughJSDateTest; ++ (void) roundTripThroughObjCDateTest; +@end + +static unsigned unitFlags = NSCalendarUnitSecond | NSCalendarUnitMinute | NSCalendarUnitHour | NSCalendarUnitDay | NSCalendarUnitMonth | NSCalendarUnitYear; + +@implementation DateTests ++ (void) NSDateToJSDateTest +{ + JSContext *context = [[JSContext alloc] init]; + NSDate *now = [NSDate dateWithTimeIntervalSinceNow:0]; + NSDateComponents *components = [[NSCalendar currentCalendar] components:unitFlags fromDate:now]; + JSValue *jsNow = [JSValue valueWithObject:now inContext:context]; + int year = [[jsNow invokeMethod:@"getFullYear" withArguments:@[]] toInt32]; + // Months are 0-indexed for JavaScript Dates. + int month = [[jsNow invokeMethod:@"getMonth" withArguments:@[]] toInt32] + 1; + int day = [[jsNow invokeMethod:@"getDate" withArguments:@[]] toInt32]; + int hour = [[jsNow invokeMethod:@"getHours" withArguments:@[]] toInt32]; + int minute = [[jsNow invokeMethod:@"getMinutes" withArguments:@[]] toInt32]; + int second = [[jsNow invokeMethod:@"getSeconds" withArguments:@[]] toInt32]; + + checkResult(@"NSDate to JS Date", year == [components year] + && month == [components month] + && day == [components day] + && hour == [components hour] + && minute == [components minute] + && second == [components second]); +} + ++ (void) JSDateToNSDateTest +{ + JSContext *context = [[JSContext alloc] init]; + NSDateFormatter *formatter = [[NSDateFormatter alloc] init]; + [formatter setDateFormat:@"MMMM dd',' yyyy hh:mm:ss"]; + NSDate *februaryFourth2014 = [formatter dateFromString:@"February 4, 2014 11:40:03"]; + NSDateComponents *components = [[NSCalendar currentCalendar] components:unitFlags fromDate:februaryFourth2014]; + // Months are 0-indexed for JavaScript Dates. + JSValue *jsDate = [context[@"Date"] constructWithArguments:@[@2014, @1, @4, @11, @40, @3]]; + + int year = [[jsDate invokeMethod:@"getFullYear" withArguments:@[]] toInt32]; + int month = [[jsDate invokeMethod:@"getMonth" withArguments:@[]] toInt32] + 1; + int day = [[jsDate invokeMethod:@"getDate" withArguments:@[]] toInt32]; + int hour = [[jsDate invokeMethod:@"getHours" withArguments:@[]] toInt32]; + int minute = [[jsDate invokeMethod:@"getMinutes" withArguments:@[]] toInt32]; + int second = [[jsDate invokeMethod:@"getSeconds" withArguments:@[]] toInt32]; + + checkResult(@"JS Date to NSDate", year == [components year] + && month == [components month] + && day == [components day] + && hour == [components hour] + && minute == [components minute] + && second == [components second]); +} + ++ (void) roundTripThroughJSDateTest +{ + JSContext *context = [[JSContext alloc] init]; + [context evaluateScript:@"function jsReturnDate(date) { return date; }"]; + NSDateFormatter *formatter = [[NSDateFormatter alloc] init]; + [formatter setDateFormat:@"MMMM dd',' yyyy hh:mm:ss"]; + NSDate *februaryFourth2014 = [formatter dateFromString:@"February 4, 2014 11:40:03"]; + NSDateComponents *components = [[NSCalendar currentCalendar] components:unitFlags fromDate:februaryFourth2014]; + + JSValue *roundTripThroughJS = [context[@"jsReturnDate"] callWithArguments:@[februaryFourth2014]]; + int year = [[roundTripThroughJS invokeMethod:@"getFullYear" withArguments:@[]] toInt32]; + // Months are 0-indexed for JavaScript Dates. + int month = [[roundTripThroughJS invokeMethod:@"getMonth" withArguments:@[]] toInt32] + 1; + int day = [[roundTripThroughJS invokeMethod:@"getDate" withArguments:@[]] toInt32]; + int hour = [[roundTripThroughJS invokeMethod:@"getHours" withArguments:@[]] toInt32]; + int minute = [[roundTripThroughJS invokeMethod:@"getMinutes" withArguments:@[]] toInt32]; + int second = [[roundTripThroughJS invokeMethod:@"getSeconds" withArguments:@[]] toInt32]; + + checkResult(@"JS date round trip", year == [components year] + && month == [components month] + && day == [components day] + && hour == [components hour] + && minute == [components minute] + && second == [components second]); +} + ++ (void) roundTripThroughObjCDateTest +{ + JSContext *context = [[JSContext alloc] init]; + context[@"objcReturnDate"] = ^(NSDate *date) { + return date; + }; + [context evaluateScript:@"function test() {\ + var date = new Date(2014, 1, 4, 11, 40, 3); \ + var result = objcReturnDate(date); \ + return date.getYear() === result.getYear() \ + && date.getMonth() === result.getMonth() \ + && date.getDate() === result.getDate() \ + && date.getHours() === result.getHours() \ + && date.getMinutes() === result.getMinutes() \ + && date.getSeconds() === result.getSeconds() \ + && date.getMilliseconds() === result.getMilliseconds();\ + }"]; + + checkResult(@"ObjC date round trip", [[context[@"test"] callWithArguments:@[]] toBool]); +} + +@end + +void runDateTests() +{ + @autoreleasepool { + [DateTests NSDateToJSDateTest]; + [DateTests JSDateToNSDateTest]; + [DateTests roundTripThroughJSDateTest]; + [DateTests roundTripThroughObjCDateTest]; + } +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/ExecutionTimeLimitTest.cpp b/API/tests/ExecutionTimeLimitTest.cpp new file mode 100644 index 0000000..d5e5324 --- /dev/null +++ b/API/tests/ExecutionTimeLimitTest.cpp @@ -0,0 +1,374 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutionTimeLimitTest.h" + +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include +#include +#include + +using namespace std::chrono; +using JSC::Options; + +static JSGlobalContextRef context = nullptr; + +static JSValueRef currentCPUTimeAsJSFunctionCallback(JSContextRef ctx, JSObjectRef functionObject, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(functionObject); + UNUSED_PARAM(thisObject); + UNUSED_PARAM(argumentCount); + UNUSED_PARAM(arguments); + UNUSED_PARAM(exception); + + ASSERT(JSContextGetGlobalContext(ctx) == context); + return JSValueMakeNumber(ctx, currentCPUTime().count() / 1000000.); +} + +bool shouldTerminateCallbackWasCalled = false; +static bool shouldTerminateCallback(JSContextRef, void*) +{ + shouldTerminateCallbackWasCalled = true; + return true; +} + +bool cancelTerminateCallbackWasCalled = false; +static bool cancelTerminateCallback(JSContextRef, void*) +{ + cancelTerminateCallbackWasCalled = true; + return false; +} + +int extendTerminateCallbackCalled = 0; +static bool extendTerminateCallback(JSContextRef ctx, void*) +{ + extendTerminateCallbackCalled++; + if (extendTerminateCallbackCalled == 1) { + JSContextGroupRef contextGroup = JSContextGetGroup(ctx); + JSContextGroupSetExecutionTimeLimit(contextGroup, .200f, extendTerminateCallback, 0); + return false; + } + return true; +} + +struct TierOptions { + const char* tier; + unsigned timeLimitAdjustmentMillis; + const char* optionsStr; +}; + +static void testResetAfterTimeout(bool& failed) +{ + JSValueRef v = nullptr; + JSValueRef exception = nullptr; + const char* reentryScript = "100"; + JSStringRef script = JSStringCreateWithUTF8CString(reentryScript); + v = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + if (exception) { + printf("FAIL: Watchdog timeout was not reset.\n"); + failed = true; + } else if (!JSValueIsNumber(context, v) || JSValueToNumber(context, v, nullptr) != 100) { + printf("FAIL: Script result is not as expected.\n"); + failed = true; + } +} + +int testExecutionTimeLimit() +{ + static const TierOptions tierOptionsList[] = { + { "LLINT", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=false" }, + { "Baseline", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=false" }, + { "DFG", 0, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=false" }, + { "FTL", 200, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=true" }, + }; + + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + for (auto tierOptions : tierOptionsList) { + StringBuilder savedOptionsBuilder; + Options::dumpAllOptionsInALine(savedOptionsBuilder); + + Options::setOptions(tierOptions.optionsStr); + + unsigned tierAdjustmentMillis = tierOptions.timeLimitAdjustmentMillis; + double timeLimit; + + context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSContextGroupRef contextGroup = JSContextGetGroup(context); + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT(JSValueIsObject(context, globalObject)); + + JSValueRef exception = nullptr; + + JSStringRef currentCPUTimeStr = JSStringCreateWithUTF8CString("currentCPUTime"); + JSObjectRef currentCPUTimeFunction = JSObjectMakeFunctionWithCallback(context, currentCPUTimeStr, currentCPUTimeAsJSFunctionCallback); + JSObjectSetProperty(context, globalObject, currentCPUTimeStr, currentCPUTimeFunction, kJSPropertyAttributeNone, nullptr); + JSStringRelease(currentCPUTimeStr); + + /* Test script timeout: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && shouldTerminateCallbackWasCalled) + printf("PASS: %s script timed out as expected.\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout with tail calls: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("var startTime = currentCPUTime();" + "function recurse(i) {" + "'use strict';" + "if (i % 1000 === 0) {" + "if (currentCPUTime() - startTime >"); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(" ) { return; }"); + scriptBuilder.appendLiteral(" }"); + scriptBuilder.appendLiteral(" return recurse(i + 1); }"); + scriptBuilder.appendLiteral("recurse(0);"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && shouldTerminateCallbackWasCalled) + printf("PASS: %s script with infinite tail calls timed out as expected .\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script with infinite tail calls did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script with infinite tail calls' timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test the script timeout's TerminatedExecutionException should NOT be catchable: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, shouldTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); try { while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } catch(e) { } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) || !shouldTerminateCallbackWasCalled) { + if (!((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired))) + printf("FAIL: %s script did not time out as expected.\n", tierOptions.tier); + if (!shouldTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (exception) + printf("PASS: %s TerminatedExecutionException was not catchable as expected.\n", tierOptions.tier); + else { + printf("FAIL: %s TerminatedExecutionException was caught.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout with no callback: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, 0, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + shouldTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) && !shouldTerminateCallbackWasCalled) + printf("PASS: %s script timed out as expected when no callback is specified.\n", tierOptions.tier); + else { + if ((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) + printf("FAIL: %s script did not time out as expected when no callback is specified.\n", tierOptions.tier); + else + printf("FAIL: %s script called stale callback function.\n", tierOptions.tier); + failed = true; + } + + if (!exception) { + printf("FAIL: %s TerminatedExecutionException was not thrown.\n", tierOptions.tier); + failed = true; + } + + testResetAfterTimeout(failed); + } + + /* Test script timeout cancellation: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, cancelTerminateCallback, 0); + { + unsigned timeAfterWatchdogShouldHaveFired = 300 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(timeAfterWatchdogShouldHaveFired / 1000.0); + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + cancelTerminateCallbackWasCalled = false; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + + if (((endTime - startTime) >= milliseconds(timeAfterWatchdogShouldHaveFired)) && cancelTerminateCallbackWasCalled && !exception) + printf("PASS: %s script timeout was cancelled as expected.\n", tierOptions.tier); + else { + if (((endTime - startTime) < milliseconds(timeAfterWatchdogShouldHaveFired)) || exception) + printf("FAIL: %s script timeout was not cancelled.\n", tierOptions.tier); + if (!cancelTerminateCallbackWasCalled) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + failed = true; + } + + if (exception) { + printf("FAIL: %s Unexpected TerminatedExecutionException thrown.\n", tierOptions.tier); + failed = true; + } + } + + /* Test script timeout extension: */ + timeLimit = (100 + tierAdjustmentMillis) / 1000.0; + JSContextGroupSetExecutionTimeLimit(contextGroup, timeLimit, extendTerminateCallback, 0); + { + unsigned timeBeforeExtendedDeadline = 250 + tierAdjustmentMillis; + unsigned timeAfterExtendedDeadline = 600 + tierAdjustmentMillis; + unsigned maxBusyLoopTime = 750 + tierAdjustmentMillis; + + StringBuilder scriptBuilder; + scriptBuilder.appendLiteral("function foo() { var startTime = currentCPUTime(); while (true) { for (var i = 0; i < 1000; i++); if (currentCPUTime() - startTime > "); + scriptBuilder.appendNumber(maxBusyLoopTime / 1000.0); // in seconds. + scriptBuilder.appendLiteral(") break; } } foo();"); + + JSStringRef script = JSStringCreateWithUTF8CString(scriptBuilder.toString().utf8().data()); + exception = nullptr; + extendTerminateCallbackCalled = 0; + + auto startTime = currentCPUTime(); + JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + auto endTime = currentCPUTime(); + auto deltaTime = endTime - startTime; + + if ((deltaTime >= milliseconds(timeBeforeExtendedDeadline)) && (deltaTime < milliseconds(timeAfterExtendedDeadline)) && (extendTerminateCallbackCalled == 2) && exception) + printf("PASS: %s script timeout was extended as expected.\n", tierOptions.tier); + else { + if (deltaTime < milliseconds(timeBeforeExtendedDeadline)) + printf("FAIL: %s script timeout was not extended as expected.\n", tierOptions.tier); + else if (deltaTime >= milliseconds(timeAfterExtendedDeadline)) + printf("FAIL: %s script did not timeout.\n", tierOptions.tier); + + if (extendTerminateCallbackCalled < 1) + printf("FAIL: %s script timeout callback was not called.\n", tierOptions.tier); + if (extendTerminateCallbackCalled < 2) + printf("FAIL: %s script timeout callback was not called after timeout extension.\n", tierOptions.tier); + + if (!exception) + printf("FAIL: %s TerminatedExecutionException was not thrown during timeout extension test.\n", tierOptions.tier); + + failed = true; + } + } + + JSGlobalContextRelease(context); + + Options::setOptions(savedOptionsBuilder.toString().ascii().data()); + } + + return failed; +} diff --git a/API/tests/ExecutionTimeLimitTest.h b/API/tests/ExecutionTimeLimitTest.h new file mode 100644 index 0000000..2c937d0 --- /dev/null +++ b/API/tests/ExecutionTimeLimitTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testExecutionTimeLimit(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/FunctionOverridesTest.cpp b/API/tests/FunctionOverridesTest.cpp new file mode 100644 index 0000000..a325f83 --- /dev/null +++ b/API/tests/FunctionOverridesTest.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FunctionOverridesTest.h" + +#include "FunctionOverrides.h" +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include + +using JSC::Options; + +int testFunctionOverrides() +{ + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + const char* oldFunctionOverrides = Options::functionOverrides(); + + Options::functionOverrides() = "testapi-function-overrides.js"; + JSC::FunctionOverrides::reinstallOverrides(); + + JSGlobalContextRef context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT_UNUSED(globalObject, JSValueIsObject(context, globalObject)); + + const char* scriptString = + "var str = '';" "\n" + "function f1() { /* Original f1 */ }" "\n" + "str += f1 + '\\n';" "\n" + "var f2 = function() {" "\n" + " // Original f2" "\n" + "}" "\n" + "str += f2 + '\\n';" "\n" + "str += (function() { /* Original f3 */ }) + '\\n';" "\n" + "var f4Source = '/* Original f4 */'" "\n" + "var f4 = new Function(f4Source);" "\n" + "str += f4 + '\\n';" "\n" + "\n" + "var expectedStr =" "\n" + "'function f1() { /* Overridden f1 */ }\\n" + "function () { /* Overridden f2 */ }\\n" + "function () { /* Overridden f3 */ }\\n" + "function anonymous() { /* Overridden f4 */ }\\n';" + "var result = (str == expectedStr);" "\n" + "result"; + + JSStringRef script = JSStringCreateWithUTF8CString(scriptString); + JSValueRef exception = nullptr; + JSValueRef resultRef = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + + if (!JSValueIsBoolean(context, resultRef) || !JSValueToBoolean(context, resultRef)) + failed = true; + + JSGlobalContextRelease(context); + + JSC::Options::functionOverrides() = oldFunctionOverrides; + JSC::FunctionOverrides::reinstallOverrides(); + + printf("%s: function override tests.\n", failed ? "FAIL" : "PASS"); + + return failed; +} diff --git a/API/tests/FunctionOverridesTest.h b/API/tests/FunctionOverridesTest.h new file mode 100644 index 0000000..16237e5 --- /dev/null +++ b/API/tests/FunctionOverridesTest.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testFunctionOverrides(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/GlobalContextWithFinalizerTest.cpp b/API/tests/GlobalContextWithFinalizerTest.cpp new file mode 100644 index 0000000..0486a26 --- /dev/null +++ b/API/tests/GlobalContextWithFinalizerTest.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GlobalContextWithFinalizerTest.h" + +#include "JavaScriptCore.h" + +static bool failed = true; + +static void finalize(JSObjectRef) +{ + failed = false; +} + +int testGlobalContextWithFinalizer() +{ + JSClassDefinition def = kJSClassDefinitionEmpty; + def.className = "testClass"; + def.finalize = finalize; + JSClassRef classRef = JSClassCreate(&def); + + JSGlobalContextRef ref = JSGlobalContextCreateInGroup(nullptr, classRef); + JSGlobalContextRelease(ref); + JSClassRelease(classRef); + + if (failed) + printf("FAIL: JSGlobalContextRef did not call its JSClassRef finalizer.\n"); + else + printf("PASS: JSGlobalContextRef called its JSClassRef finalizer as expected.\n"); + + return failed; +} diff --git a/API/tests/GlobalContextWithFinalizerTest.h b/API/tests/GlobalContextWithFinalizerTest.h new file mode 100644 index 0000000..1961350 --- /dev/null +++ b/API/tests/GlobalContextWithFinalizerTest.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "JSContextRefPrivate.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Returns 1 if failures were encountered. Else, returns 0. */ +int testGlobalContextWithFinalizer(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/JSExportTests.h b/API/tests/JSExportTests.h new file mode 100644 index 0000000..9d501ee --- /dev/null +++ b/API/tests/JSExportTests.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runJSExportTests(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/API/tests/JSExportTests.mm b/API/tests/JSExportTests.mm new file mode 100644 index 0000000..9d22e3f --- /dev/null +++ b/API/tests/JSExportTests.mm @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import "JSExportTests.h" + +#import +#import + +#if JSC_OBJC_API_ENABLED + +extern "C" void checkResult(NSString *description, bool passed); + +@interface JSExportTests : NSObject ++ (void) exportInstanceMethodWithIdProtocolTest; ++ (void) exportInstanceMethodWithClassProtocolTest; ++ (void) exportDynamicallyGeneratedProtocolTest; +@end + +@protocol TruthTeller +- (BOOL) returnTrue; +@end + +@interface TruthTeller : NSObject +@end + +@implementation TruthTeller +- (BOOL) returnTrue +{ + return true; +} +@end + +@protocol ExportMethodWithIdProtocol +- (void) methodWithIdProtocol:(id)object; +@end + +@interface ExportMethodWithIdProtocol : NSObject +@end + +@implementation ExportMethodWithIdProtocol +- (void) methodWithIdProtocol:(id)object +{ + checkResult(@"Exporting a method with id in the type signature", [object returnTrue]); +} +@end + +@protocol ExportMethodWithClassProtocol +- (void) methodWithClassProtocol:(NSObject *)object; +@end + +@interface ExportMethodWithClassProtocol : NSObject +@end + +@implementation ExportMethodWithClassProtocol +- (void) methodWithClassProtocol:(NSObject *)object +{ + checkResult(@"Exporting a method with class in the type signature", [object returnTrue]); +} +@end + +@implementation JSExportTests ++ (void) exportInstanceMethodWithIdProtocolTest +{ + JSContext *context = [[JSContext alloc] init]; + context[@"ExportMethodWithIdProtocol"] = [ExportMethodWithIdProtocol class]; + context[@"makeTestObject"] = ^{ + return [[ExportMethodWithIdProtocol alloc] init]; + }; + context[@"opaqueObject"] = [[TruthTeller alloc] init]; + [context evaluateScript:@"makeTestObject().methodWithIdProtocol(opaqueObject);"]; + checkResult(@"Successfully exported instance method", !context.exception); +} + ++ (void) exportInstanceMethodWithClassProtocolTest +{ + JSContext *context = [[JSContext alloc] init]; + context[@"ExportMethodWithClassProtocol"] = [ExportMethodWithClassProtocol class]; + context[@"makeTestObject"] = ^{ + return [[ExportMethodWithClassProtocol alloc] init]; + }; + context[@"opaqueObject"] = [[TruthTeller alloc] init]; + [context evaluateScript:@"makeTestObject().methodWithClassProtocol(opaqueObject);"]; + checkResult(@"Successfully exported instance method", !context.exception); +} + ++ (void) exportDynamicallyGeneratedProtocolTest +{ + JSContext *context = [[JSContext alloc] init]; + Protocol *dynProtocol = objc_allocateProtocol("NSStringJSExport"); + Protocol *jsExportProtocol = @protocol(JSExport); + protocol_addProtocol(dynProtocol, jsExportProtocol); + Method method = class_getInstanceMethod([NSString class], @selector(boolValue)); + protocol_addMethodDescription(dynProtocol, @selector(boolValue), method_getTypeEncoding(method), YES, YES); + NSLog(@"type encoding = %s", method_getTypeEncoding(method)); + protocol_addMethodDescription(dynProtocol, @selector(boolValue), "B@:", YES, YES); + objc_registerProtocol(dynProtocol); + class_addProtocol([NSString class], dynProtocol); + + context[@"NSString"] = [NSString class]; + context[@"myString"] = @"YES"; + JSValue *value = [context evaluateScript:@"myString.boolValue()"]; + checkResult(@"Dynamically generated JSExport-ed protocols are ignored", [value isUndefined] && !!context.exception); +} +@end + +void runJSExportTests() +{ + @autoreleasepool { + [JSExportTests exportInstanceMethodWithIdProtocolTest]; + [JSExportTests exportInstanceMethodWithClassProtocolTest]; + [JSExportTests exportDynamicallyGeneratedProtocolTest]; + } +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/JSNode.c b/API/tests/JSNode.c new file mode 100644 index 0000000..d0a0dc3 --- /dev/null +++ b/API/tests/JSNode.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "JSNode.h" +#include "JSNodeList.h" +#include "JSObjectRef.h" +#include "JSStringRef.h" +#include "JSValueRef.h" +#include "Node.h" +#include "NodeList.h" +#include + +static JSValueRef JSNode_appendChild(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(function); + + /* Example of throwing a type error for invalid values */ + if (!JSValueIsObjectOfClass(context, thisObject, JSNode_class(context))) { + JSStringRef message = JSStringCreateWithUTF8CString("TypeError: appendChild can only be called on nodes"); + *exception = JSValueMakeString(context, message); + JSStringRelease(message); + } else if (argumentCount < 1 || !JSValueIsObjectOfClass(context, arguments[0], JSNode_class(context))) { + JSStringRef message = JSStringCreateWithUTF8CString("TypeError: first argument to appendChild must be a node"); + *exception = JSValueMakeString(context, message); + JSStringRelease(message); + } else { + Node* node = JSObjectGetPrivate(thisObject); + Node* child = JSObjectGetPrivate(JSValueToObject(context, arguments[0], NULL)); + + Node_appendChild(node, child); + } + + return JSValueMakeUndefined(context); +} + +static JSValueRef JSNode_removeChild(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(function); + + /* Example of ignoring invalid values */ + if (argumentCount > 0) { + if (JSValueIsObjectOfClass(context, thisObject, JSNode_class(context))) { + if (JSValueIsObjectOfClass(context, arguments[0], JSNode_class(context))) { + Node* node = JSObjectGetPrivate(thisObject); + Node* child = JSObjectGetPrivate(JSValueToObject(context, arguments[0], exception)); + + Node_removeChild(node, child); + } + } + } + + return JSValueMakeUndefined(context); +} + +static JSValueRef JSNode_replaceChild(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(function); + + if (argumentCount > 1) { + if (JSValueIsObjectOfClass(context, thisObject, JSNode_class(context))) { + if (JSValueIsObjectOfClass(context, arguments[0], JSNode_class(context))) { + if (JSValueIsObjectOfClass(context, arguments[1], JSNode_class(context))) { + Node* node = JSObjectGetPrivate(thisObject); + Node* newChild = JSObjectGetPrivate(JSValueToObject(context, arguments[0], exception)); + Node* oldChild = JSObjectGetPrivate(JSValueToObject(context, arguments[1], exception)); + + Node_replaceChild(node, newChild, oldChild); + } + } + } + } + + return JSValueMakeUndefined(context); +} + +static JSStaticFunction JSNode_staticFunctions[] = { + { "appendChild", JSNode_appendChild, kJSPropertyAttributeDontDelete }, + { "removeChild", JSNode_removeChild, kJSPropertyAttributeDontDelete }, + { "replaceChild", JSNode_replaceChild, kJSPropertyAttributeDontDelete }, + { 0, 0, 0 } +}; + +static JSValueRef JSNode_getNodeType(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) +{ + UNUSED_PARAM(propertyName); + UNUSED_PARAM(exception); + + Node* node = JSObjectGetPrivate(object); + if (node) { + JSStringRef nodeType = JSStringCreateWithUTF8CString(node->nodeType); + JSValueRef value = JSValueMakeString(context, nodeType); + JSStringRelease(nodeType); + return value; + } + + return NULL; +} + +static JSValueRef JSNode_getChildNodes(JSContextRef context, JSObjectRef thisObject, JSStringRef propertyName, JSValueRef* exception) +{ + UNUSED_PARAM(propertyName); + UNUSED_PARAM(exception); + + Node* node = JSObjectGetPrivate(thisObject); + ASSERT(node); + return JSNodeList_new(context, NodeList_new(node)); +} + +static JSValueRef JSNode_getFirstChild(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) +{ + UNUSED_PARAM(object); + UNUSED_PARAM(propertyName); + UNUSED_PARAM(exception); + + return JSValueMakeUndefined(context); +} + +static JSStaticValue JSNode_staticValues[] = { + { "nodeType", JSNode_getNodeType, NULL, kJSPropertyAttributeDontDelete | kJSPropertyAttributeReadOnly }, + { "childNodes", JSNode_getChildNodes, NULL, kJSPropertyAttributeDontDelete | kJSPropertyAttributeReadOnly }, + { "firstChild", JSNode_getFirstChild, NULL, kJSPropertyAttributeDontDelete | kJSPropertyAttributeReadOnly }, + { 0, 0, 0, 0 } +}; + +static void JSNode_initialize(JSContextRef context, JSObjectRef object) +{ + UNUSED_PARAM(context); + + Node* node = JSObjectGetPrivate(object); + ASSERT(node); + + Node_ref(node); +} + +static void JSNode_finalize(JSObjectRef object) +{ + Node* node = JSObjectGetPrivate(object); + ASSERT(node); + + Node_deref(node); +} + +JSClassRef JSNode_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) { + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.staticValues = JSNode_staticValues; + definition.staticFunctions = JSNode_staticFunctions; + definition.initialize = JSNode_initialize; + definition.finalize = JSNode_finalize; + + jsClass = JSClassCreate(&definition); + } + return jsClass; +} + +JSObjectRef JSNode_new(JSContextRef context, Node* node) +{ + return JSObjectMake(context, JSNode_class(context), node); +} + +JSObjectRef JSNode_construct(JSContextRef context, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(object); + UNUSED_PARAM(argumentCount); + UNUSED_PARAM(arguments); + UNUSED_PARAM(exception); + + return JSNode_new(context, Node_new()); +} diff --git a/API/tests/JSNode.h b/API/tests/JSNode.h new file mode 100644 index 0000000..dc3e1ca --- /dev/null +++ b/API/tests/JSNode.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "JSBase.h" +#include "Node.h" +#include + +extern JSObjectRef JSNode_new(JSContextRef context, Node* node); +extern JSClassRef JSNode_class(JSContextRef context); +extern JSObjectRef JSNode_construct(JSContextRef context, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); diff --git a/API/tests/JSNodeList.c b/API/tests/JSNodeList.c new file mode 100644 index 0000000..f037e09 --- /dev/null +++ b/API/tests/JSNodeList.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "JSNode.h" +#include "JSNodeList.h" +#include "JSObjectRef.h" +#include "JSValueRef.h" +#include + +static JSValueRef JSNodeList_item(JSContextRef context, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(object); + + if (argumentCount > 0) { + NodeList* nodeList = JSObjectGetPrivate(thisObject); + ASSERT(nodeList); + Node* node = NodeList_item(nodeList, (unsigned)JSValueToNumber(context, arguments[0], exception)); + if (node) + return JSNode_new(context, node); + } + + return JSValueMakeUndefined(context); +} + +static JSStaticFunction JSNodeList_staticFunctions[] = { + { "item", JSNodeList_item, kJSPropertyAttributeDontDelete }, + { 0, 0, 0 } +}; + +static JSValueRef JSNodeList_length(JSContextRef context, JSObjectRef thisObject, JSStringRef propertyName, JSValueRef* exception) +{ + UNUSED_PARAM(propertyName); + UNUSED_PARAM(exception); + + NodeList* nodeList = JSObjectGetPrivate(thisObject); + ASSERT(nodeList); + return JSValueMakeNumber(context, NodeList_length(nodeList)); +} + +static JSStaticValue JSNodeList_staticValues[] = { + { "length", JSNodeList_length, NULL, kJSPropertyAttributeReadOnly | kJSPropertyAttributeDontDelete }, + { 0, 0, 0, 0 } +}; + +static JSValueRef JSNodeList_getProperty(JSContextRef context, JSObjectRef thisObject, JSStringRef propertyName, JSValueRef* exception) +{ + NodeList* nodeList = JSObjectGetPrivate(thisObject); + ASSERT(nodeList); + double index = JSValueToNumber(context, JSValueMakeString(context, propertyName), exception); + unsigned uindex = (unsigned)index; + if (uindex == index) { /* false for NaN */ + Node* node = NodeList_item(nodeList, uindex); + if (node) + return JSNode_new(context, node); + } + + return NULL; +} + +static void JSNodeList_initialize(JSContextRef context, JSObjectRef thisObject) +{ + UNUSED_PARAM(context); + + NodeList* nodeList = JSObjectGetPrivate(thisObject); + ASSERT(nodeList); + + NodeList_ref(nodeList); +} + +static void JSNodeList_finalize(JSObjectRef thisObject) +{ + NodeList* nodeList = JSObjectGetPrivate(thisObject); + ASSERT(nodeList); + + NodeList_deref(nodeList); +} + +static JSClassRef JSNodeList_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) { + JSClassDefinition definition = kJSClassDefinitionEmpty; + definition.staticValues = JSNodeList_staticValues; + definition.staticFunctions = JSNodeList_staticFunctions; + definition.getProperty = JSNodeList_getProperty; + definition.initialize = JSNodeList_initialize; + definition.finalize = JSNodeList_finalize; + + jsClass = JSClassCreate(&definition); + } + + return jsClass; +} + +JSObjectRef JSNodeList_new(JSContextRef context, NodeList* nodeList) +{ + return JSObjectMake(context, JSNodeList_class(context), nodeList); +} diff --git a/API/tests/JSNodeList.h b/API/tests/JSNodeList.h new file mode 100644 index 0000000..c2d2bb9 --- /dev/null +++ b/API/tests/JSNodeList.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "JSBase.h" +#include "NodeList.h" + +extern JSObjectRef JSNodeList_new(JSContextRef, NodeList*); diff --git a/API/tests/JSONParseTest.cpp b/API/tests/JSONParseTest.cpp new file mode 100644 index 0000000..2797c3c --- /dev/null +++ b/API/tests/JSONParseTest.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JSONParseTest.h" + +#include "JSCInlines.h" +#include "JSGlobalObject.h" +#include "JSONObject.h" +#include "VM.h" +#include + +using namespace JSC; + +int testJSONParse() +{ + bool failed = false; + + RefPtr vm = VM::create(); + + JSLockHolder locker(vm.get()); + JSGlobalObject* globalObject = JSGlobalObject::create(*vm, JSGlobalObject::createStructure(*vm, jsNull())); + + ExecState* exec = globalObject->globalExec(); + JSValue v0 = JSONParse(exec, ""); + JSValue v1 = JSONParse(exec, "#$%^"); + JSValue v2 = JSONParse(exec, String()); + UChar emptyUCharArray[1] = { '\0' }; + JSValue v3 = JSONParse(exec, String(emptyUCharArray, 0)); + JSValue v4; + JSValue v5 = JSONParse(exec, "123"); + + failed = failed || (v0 != v1); + failed = failed || (v1 != v2); + failed = failed || (v2 != v3); + failed = failed || (v3 != v4); + failed = failed || (v4 == v5); + + vm = nullptr; + + if (failed) + printf("FAIL: JSONParse String test.\n"); + else + printf("PASS: JSONParse String test.\n"); + + return failed; +} diff --git a/API/tests/JSONParseTest.h b/API/tests/JSONParseTest.h new file mode 100644 index 0000000..13842f9 --- /dev/null +++ b/API/tests/JSONParseTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testJSONParse(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/Node.c b/API/tests/Node.c new file mode 100644 index 0000000..db687e9 --- /dev/null +++ b/API/tests/Node.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "Node.h" +#include +#include + +Node* Node_new(void) +{ + Node* node = (Node*)malloc(sizeof(Node)); + node->refCount = 0; + node->nodeType = "Node"; + node->childNodesTail = NULL; + + return node; +} + +void Node_appendChild(Node* node, Node* child) +{ + Node_ref(child); + NodeLink* nodeLink = (NodeLink*)malloc(sizeof(NodeLink)); + nodeLink->node = child; + nodeLink->prev = node->childNodesTail; + node->childNodesTail = nodeLink; +} + +void Node_removeChild(Node* node, Node* child) +{ + /* Linear search from tail -- good enough for our purposes here */ + NodeLink* current; + NodeLink** currentHandle; + for (currentHandle = &node->childNodesTail, current = *currentHandle; current; currentHandle = ¤t->prev, current = *currentHandle) { + if (current->node == child) { + Node_deref(current->node); + *currentHandle = current->prev; + free(current); + break; + } + } +} + +void Node_replaceChild(Node* node, Node* newChild, Node* oldChild) +{ + /* Linear search from tail -- good enough for our purposes here */ + NodeLink* current; + for (current = node->childNodesTail; current; current = current->prev) { + if (current->node == oldChild) { + Node_deref(current->node); + current->node = newChild; + } + } +} + +void Node_ref(Node* node) +{ + ++node->refCount; +} + +void Node_deref(Node* node) +{ + if (--node->refCount == 0) + free(node); +} diff --git a/API/tests/Node.h b/API/tests/Node.h new file mode 100644 index 0000000..bdb1f2c --- /dev/null +++ b/API/tests/Node.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +typedef struct __Node Node; +typedef struct __NodeLink NodeLink; + +struct __NodeLink { + Node* node; + NodeLink* prev; +}; + +struct __Node { + unsigned refCount; + const char* nodeType; + NodeLink* childNodesTail; +}; + +extern Node* Node_new(void); +extern void Node_ref(Node* node); +extern void Node_deref(Node* node); +extern void Node_appendChild(Node* node, Node* child); +extern void Node_removeChild(Node* node, Node* child); +extern void Node_replaceChild(Node* node, Node* newChild, Node* oldChild); diff --git a/API/tests/NodeList.c b/API/tests/NodeList.c new file mode 100644 index 0000000..69f4cd5 --- /dev/null +++ b/API/tests/NodeList.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "NodeList.h" + +#include + +extern NodeList* NodeList_new(Node* parentNode) +{ + Node_ref(parentNode); + + NodeList* nodeList = (NodeList*)malloc(sizeof(NodeList)); + nodeList->parentNode = parentNode; + nodeList->refCount = 0; + return nodeList; +} + +extern unsigned NodeList_length(NodeList* nodeList) +{ + /* Linear count from tail -- good enough for our purposes here */ + unsigned i = 0; + NodeLink* n = nodeList->parentNode->childNodesTail; + while (n) { + n = n->prev; + ++i; + } + + return i; +} + +extern Node* NodeList_item(NodeList* nodeList, unsigned index) +{ + unsigned length = NodeList_length(nodeList); + if (index >= length) + return NULL; + + /* Linear search from tail -- good enough for our purposes here */ + NodeLink* n = nodeList->parentNode->childNodesTail; + unsigned i = 0; + unsigned count = length - 1 - index; + while (i < count) { + ++i; + n = n->prev; + } + return n->node; +} + +extern void NodeList_ref(NodeList* nodeList) +{ + ++nodeList->refCount; +} + +extern void NodeList_deref(NodeList* nodeList) +{ + if (--nodeList->refCount == 0) { + Node_deref(nodeList->parentNode); + free(nodeList); + } +} diff --git a/API/tests/NodeList.h b/API/tests/NodeList.h new file mode 100644 index 0000000..51163c2 --- /dev/null +++ b/API/tests/NodeList.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "Node.h" + +typedef struct { + unsigned refCount; + Node* parentNode; +} NodeList; + +extern NodeList* NodeList_new(Node* parentNode); +extern unsigned NodeList_length(NodeList*); +extern Node* NodeList_item(NodeList*, unsigned); +extern void NodeList_ref(NodeList*); +extern void NodeList_deref(NodeList*); diff --git a/API/tests/PingPongStackOverflowTest.cpp b/API/tests/PingPongStackOverflowTest.cpp new file mode 100644 index 0000000..ef4b914 --- /dev/null +++ b/API/tests/PingPongStackOverflowTest.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PingPongStackOverflowTest.h" + +#include "InitializeThreading.h" +#include "JSContextRefPrivate.h" +#include "JavaScriptCore.h" +#include "Options.h" +#include + +using JSC::Options; + +static JSGlobalContextRef context = nullptr; +static int nativeRecursionCount = 0; + +static bool PingPongStackOverflowObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception) +{ + UNUSED_PARAM(context); + UNUSED_PARAM(constructor); + + JSStringRef hasInstanceName = JSStringCreateWithUTF8CString("hasInstance"); + JSValueRef hasInstance = JSObjectGetProperty(context, constructor, hasInstanceName, exception); + JSStringRelease(hasInstanceName); + if (!hasInstance) + return false; + + int countAtEntry = nativeRecursionCount++; + + JSValueRef result = 0; + if (nativeRecursionCount < 100) { + JSObjectRef function = JSValueToObject(context, hasInstance, exception); + result = JSObjectCallAsFunction(context, function, constructor, 1, &possibleValue, exception); + } else { + StringBuilder builder; + builder.appendLiteral("dummy.valueOf([0]"); + for (int i = 1; i < 35000; i++) { + builder.appendLiteral(", ["); + builder.appendNumber(i); + builder.appendLiteral("]"); + } + builder.appendLiteral(");"); + + JSStringRef script = JSStringCreateWithUTF8CString(builder.toString().utf8().data()); + result = JSEvaluateScript(context, script, NULL, NULL, 1, exception); + JSStringRelease(script); + } + + --nativeRecursionCount; + if (nativeRecursionCount != countAtEntry) + printf(" ERROR: PingPongStackOverflow test saw a recursion count mismatch\n"); + + return result && JSValueToBoolean(context, result); +} + +JSClassDefinition PingPongStackOverflowObject_definition = { + 0, + kJSClassAttributeNone, + + "PingPongStackOverflowObject", + NULL, + + NULL, + NULL, + + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + PingPongStackOverflowObject_hasInstance, + NULL, +}; + +static JSClassRef PingPongStackOverflowObject_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) + jsClass = JSClassCreate(&PingPongStackOverflowObject_definition); + + return jsClass; +} + +// This tests tests a stack overflow on VM reentry into a JS function from a native function +// after ping-pong'ing back and forth between JS and native functions multiple times. +// This test should not hang or crash. +int testPingPongStackOverflow() +{ + bool failed = false; + + JSC::initializeThreading(); + Options::initialize(); // Ensure options is initialized first. + + auto origSoftReservedZoneSize = Options::softReservedZoneSize(); + auto origReservedZoneSize = Options::reservedZoneSize(); + auto origUseLLInt = Options::useLLInt(); + auto origMaxPerThreadStackUsage = Options::maxPerThreadStackUsage(); + + Options::softReservedZoneSize() = 128 * KB; + Options::reservedZoneSize() = 64 * KB; +#if ENABLE(JIT) + // Normally, we want to disable the LLINT to force the use of JITted code which is necessary for + // reproducing the regression in https://bugs.webkit.org/show_bug.cgi?id=148749. However, we only + // want to do this if the LLINT isn't the only available execution engine. + Options::useLLInt() = false; +#endif + + const char* scriptString = + "var count = 0;" \ + "PingPongStackOverflowObject.hasInstance = function f() {" \ + " return (undefined instanceof PingPongStackOverflowObject);" \ + "};" \ + "PingPongStackOverflowObject.__proto__ = undefined;" \ + "undefined instanceof PingPongStackOverflowObject;"; + + JSValueRef scriptResult = nullptr; + JSValueRef exception = nullptr; + JSStringRef script = JSStringCreateWithUTF8CString(scriptString); + + nativeRecursionCount = 0; + context = JSGlobalContextCreateInGroup(nullptr, nullptr); + + JSObjectRef globalObject = JSContextGetGlobalObject(context); + ASSERT(JSValueIsObject(context, globalObject)); + + JSObjectRef PingPongStackOverflowObject = JSObjectMake(context, PingPongStackOverflowObject_class(context), NULL); + JSStringRef PingPongStackOverflowObjectString = JSStringCreateWithUTF8CString("PingPongStackOverflowObject"); + JSObjectSetProperty(context, globalObject, PingPongStackOverflowObjectString, PingPongStackOverflowObject, kJSPropertyAttributeNone, NULL); + JSStringRelease(PingPongStackOverflowObjectString); + + unsigned stackSize = 32 * KB; + Options::maxPerThreadStackUsage() = stackSize + Options::softReservedZoneSize(); + + exception = nullptr; + scriptResult = JSEvaluateScript(context, script, nullptr, nullptr, 1, &exception); + + if (!exception) { + printf("FAIL: PingPongStackOverflowError not thrown in PingPongStackOverflow test\n"); + failed = true; + } else if (nativeRecursionCount) { + printf("FAIL: Unbalanced native recursion count: %d in PingPongStackOverflow test\n", nativeRecursionCount); + failed = true; + } else { + printf("PASS: PingPongStackOverflow test.\n"); + } + + Options::softReservedZoneSize() = origSoftReservedZoneSize; + Options::reservedZoneSize() = origReservedZoneSize; + Options::useLLInt() = origUseLLInt; + Options::maxPerThreadStackUsage() = origMaxPerThreadStackUsage; + + return failed; +} diff --git a/API/tests/PingPongStackOverflowTest.h b/API/tests/PingPongStackOverflowTest.h new file mode 100644 index 0000000..a204669 --- /dev/null +++ b/API/tests/PingPongStackOverflowTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testPingPongStackOverflow(); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/Regress141275.h b/API/tests/Regress141275.h new file mode 100644 index 0000000..bf3492a --- /dev/null +++ b/API/tests/Regress141275.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runRegress141275(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/API/tests/Regress141275.mm b/API/tests/Regress141275.mm new file mode 100644 index 0000000..18e186a --- /dev/null +++ b/API/tests/Regress141275.mm @@ -0,0 +1,388 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import "config.h" +#import "Regress141275.h" + +#import +#import +#import + +#if JSC_OBJC_API_ENABLED + +extern "C" void JSSynchronousGarbageCollectForDebugging(JSContextRef); + +extern int failed; + +static const NSUInteger scriptToEvaluate = 50; + +@interface JSTEvaluator : NSObject +- (instancetype)initWithScript:(NSString*)script; + +- (void)insertSignPostWithCompletion:(void(^)(NSError* error))completionHandler; + +- (void)evaluateScript:(NSString*)script completion:(void(^)(NSError* error))completionHandler; +- (void)evaluateBlock:(void(^)(JSContext* context))evaluationBlock completion:(void(^)(NSError* error))completionHandler; + +- (void)waitForTasksDoneAndReportResults; +@end + + +static const NSString* JSTEvaluatorThreadContextKey = @"JSTEvaluatorThreadContextKey"; + +/* + * A JSTEvaluatorThreadContext is kept in the thread dictionary of threads used by JSEvaluator. + * + * This includes the run loop thread, and any threads used by _jsSourcePerformQueue to execute a task. + */ +@interface JSTEvaluatorThreadContext : NSObject +@property (weak) JSTEvaluator* evaluator; +@property (strong) JSContext* jsContext; +@end + +@implementation JSTEvaluatorThreadContext +@end + + +/*! + * A JSTEvaluatorTask is a single task to be executed. + * + * JSTEvaluator keeps a list of pending tasks. The run loop thread is repsonsible for feeding pending tasks to the _jsSourcePerformQueue, while respecting sign posts. + */ +@interface JSTEvaluatorTask : NSObject + +@property (nonatomic, copy) void (^evaluateBlock)(JSContext* jsContext); +@property (nonatomic, copy) void (^completionHandler)(NSError* error); +@property (nonatomic, copy) NSError* error; + ++ (instancetype)evaluatorTaskWithEvaluateBlock:(void (^)(JSContext*))block completionHandler:(void (^)(NSError* error))completionBlock; + +@end + +@implementation JSTEvaluatorTask + ++ (instancetype)evaluatorTaskWithEvaluateBlock:(void (^)(JSContext*))evaluationBlock completionHandler:(void (^)(NSError* error))completionHandler +{ + JSTEvaluatorTask* task = [self new]; + task.evaluateBlock = evaluationBlock; + task.completionHandler = completionHandler; + return task; +} + +@end + +@implementation JSTEvaluator { + dispatch_queue_t _jsSourcePerformQueue; + dispatch_semaphore_t _allScriptsDone; + CFRunLoopRef _jsThreadRunLoop; + CFRunLoopSourceRef _jsThreadRunLoopSource; + JSContext* _jsContext; + NSMutableArray* __pendingTasks; +} + +- (instancetype)init +{ + self = [super init]; + if (self) { + _jsSourcePerformQueue = dispatch_queue_create("JSTEval", DISPATCH_QUEUE_CONCURRENT); + + _allScriptsDone = dispatch_semaphore_create(0); + + _jsContext = [JSContext new]; + _jsContext.name = @"JSTEval"; + __pendingTasks = [NSMutableArray new]; + + NSThread* jsThread = [[NSThread alloc] initWithTarget:self selector:@selector(_jsThreadMain) object:nil]; + [jsThread setName:@"JSTEval"]; + [jsThread start]; + + } + return self; +} + +- (instancetype)initWithScript:(NSString*)script +{ + self = [self init]; + if (self) { + __block NSError* scriptError = nil; + dispatch_semaphore_t dsema = dispatch_semaphore_create(0); + [self evaluateScript:script + completion:^(NSError* error) { + scriptError = error; + dispatch_semaphore_signal(dsema); + }]; + dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); + } + return self; +} + +- (void)_accessPendingTasksWithBlock:(void(^)(NSMutableArray* pendingTasks))block +{ + @synchronized(self) { + block(__pendingTasks); + if (__pendingTasks.count > 0) { + if (_jsThreadRunLoop && _jsThreadRunLoopSource) { + CFRunLoopSourceSignal(_jsThreadRunLoopSource); + CFRunLoopWakeUp(_jsThreadRunLoop); + } + } + } +} + +- (void)insertSignPostWithCompletion:(void(^)(NSError* error))completionHandler +{ + [self _accessPendingTasksWithBlock:^(NSMutableArray* pendingTasks) { + JSTEvaluatorTask* task = [JSTEvaluatorTask evaluatorTaskWithEvaluateBlock:nil + completionHandler:completionHandler]; + + [pendingTasks addObject:task]; + }]; +} + +- (void)evaluateScript:(NSString*)script completion:(void(^)(NSError* error))completionHandler +{ + [self evaluateBlock:^(JSContext* context) { + [context evaluateScript:script]; + } completion:completionHandler]; +} + +- (void)evaluateBlock:(void(^)(JSContext* context))evaluationBlock completion:(void(^)(NSError* error))completionHandler +{ + NSParameterAssert(evaluationBlock != nil); + [self _accessPendingTasksWithBlock:^(NSMutableArray* pendingTasks) { + JSTEvaluatorTask* task = [JSTEvaluatorTask evaluatorTaskWithEvaluateBlock:evaluationBlock + completionHandler:completionHandler]; + + [pendingTasks addObject:task]; + }]; +} + +- (void)waitForTasksDoneAndReportResults +{ + NSString* passFailString = @"PASSED"; + + if (!dispatch_semaphore_wait(_allScriptsDone, dispatch_time(DISPATCH_TIME_NOW, 30 * NSEC_PER_SEC))) { + int totalScriptsRun = [_jsContext[@"counter"] toInt32]; + + if (totalScriptsRun != scriptToEvaluate) { + passFailString = @"FAILED"; + failed = 1; + } + + NSLog(@" Ran a total of %d scripts: %@", totalScriptsRun, passFailString); + } else { + passFailString = @"FAILED"; + failed = 1; + NSLog(@" Error, timeout waiting for all tasks to complete: %@", passFailString); + } +} + +static void __JSTRunLoopSourceScheduleCallBack(void* info, CFRunLoopRef rl, CFStringRef) +{ + @autoreleasepool { + [(__bridge JSTEvaluator*)info _sourceScheduledOnRunLoop:rl]; + } +} + +static void __JSTRunLoopSourcePerformCallBack(void* info ) +{ + @autoreleasepool { + [(__bridge JSTEvaluator*)info _sourcePerform]; + } +} + +static void __JSTRunLoopSourceCancelCallBack(void* info, CFRunLoopRef rl, CFStringRef) +{ + @autoreleasepool { + [(__bridge JSTEvaluator*)info _sourceCanceledOnRunLoop:rl]; + } +} + +- (void)_jsThreadMain +{ + @autoreleasepool { + const CFIndex kRunLoopSourceContextVersion = 0; + CFRunLoopSourceContext sourceContext = { + kRunLoopSourceContextVersion, (__bridge void*)(self), + NULL, NULL, NULL, NULL, NULL, + __JSTRunLoopSourceScheduleCallBack, + __JSTRunLoopSourceCancelCallBack, + __JSTRunLoopSourcePerformCallBack + }; + + @synchronized(self) { + _jsThreadRunLoop = CFRunLoopGetCurrent(); + CFRetain(_jsThreadRunLoop); + + _jsThreadRunLoopSource = CFRunLoopSourceCreate(kCFAllocatorDefault, 0, &sourceContext); + CFRunLoopAddSource(_jsThreadRunLoop, _jsThreadRunLoopSource, kCFRunLoopDefaultMode); + } + + CFRunLoopRun(); + + @synchronized(self) { + NSMutableDictionary* threadDict = [[NSThread currentThread] threadDictionary]; + [threadDict removeObjectForKey:threadDict[JSTEvaluatorThreadContextKey]]; + + CFRelease(_jsThreadRunLoopSource); + _jsThreadRunLoopSource = NULL; + + CFRelease(_jsThreadRunLoop); + _jsThreadRunLoop = NULL; + + __pendingTasks = nil; + } + } +} + +- (void)_sourceScheduledOnRunLoop:(CFRunLoopRef)runLoop +{ + UNUSED_PARAM(runLoop); + assert([[[NSThread currentThread] name] isEqualToString:@"JSTEval"]); + + // Wake up the run loop in case requests were submitted prior to the + // run loop & run loop source getting created. + CFRunLoopSourceSignal(_jsThreadRunLoopSource); + CFRunLoopWakeUp(_jsThreadRunLoop); +} + +- (void)_setupEvaluatorThreadContextIfNeeded +{ + NSMutableDictionary* threadDict = [[NSThread currentThread] threadDictionary]; + JSTEvaluatorThreadContext* context = threadDict[JSTEvaluatorThreadContextKey]; + // The evaluator may be other evualuator, or nil if this thread has not been used before. Eaither way take ownership. + if (context.evaluator != self) { + context = [JSTEvaluatorThreadContext new]; + context.evaluator = self; + threadDict[JSTEvaluatorThreadContextKey] = context; + } +} + +- (void)_callCompletionHandler:(void(^)(NSError* error))completionHandler ifNeededWithError:(NSError*)error +{ + if (completionHandler) { + dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ + completionHandler(error); + }); + } +} + +- (void)_sourcePerform +{ + assert([[[NSThread currentThread] name] isEqualToString:@"JSTEval"]); + + __block NSArray* tasks = nil; + [self _accessPendingTasksWithBlock:^(NSMutableArray* pendingTasks) { + // No signpost, take all tasks. + tasks = [pendingTasks copy]; + [pendingTasks removeAllObjects]; + }]; + + if (tasks.count > 0) { + for (JSTEvaluatorTask* task in tasks) { + dispatch_block_t block = ^{ + NSError* error = nil; + if (task.evaluateBlock) { + [self _setupEvaluatorThreadContextIfNeeded]; + task.evaluateBlock(_jsContext); + if (_jsContext.exception) { + NSLog(@"Did fail on JSContext: %@", _jsContext.name); + NSDictionary* userInfo = @{ NSLocalizedDescriptionKey : [_jsContext.exception[@"message"] toString] }; + error = [NSError errorWithDomain:@"JSTEvaluator" code:1 userInfo:userInfo]; + _jsContext.exception = nil; + } + } + [self _callCompletionHandler:task.completionHandler ifNeededWithError:error]; + }; + + if (task.evaluateBlock) + dispatch_async(_jsSourcePerformQueue, block); + else + dispatch_barrier_async(_jsSourcePerformQueue, block); + } + + dispatch_barrier_sync(_jsSourcePerformQueue, ^{ + if ([_jsContext[@"counter"] toInt32] == scriptToEvaluate) + dispatch_semaphore_signal(_allScriptsDone); + }); + } +} + +- (void)_sourceCanceledOnRunLoop:(CFRunLoopRef)runLoop +{ + UNUSED_PARAM(runLoop); + assert([[[NSThread currentThread] name] isEqualToString:@"JSTEval"]); + + @synchronized(self) { + assert(_jsThreadRunLoop); + assert(_jsThreadRunLoopSource); + + CFRunLoopRemoveSource(_jsThreadRunLoop, _jsThreadRunLoopSource, kCFRunLoopDefaultMode); + CFRunLoopStop(_jsThreadRunLoop); + } +} + +@end + +void runRegress141275() +{ + // Test that we can execute the same script from multiple threads with a shared context. + // See + NSLog(@"TEST: Testing multiple threads executing the same script with a shared context"); + + @autoreleasepool { + JSTEvaluator* evaluator = [[JSTEvaluator alloc] initWithScript:@"this['counter'] = 0;"]; + + void (^showErrorIfNeeded)(NSError* error) = ^(NSError* error) { + if (error) { + dispatch_async(dispatch_get_main_queue(), ^{ + NSLog(@"Error: %@", error); + }); + } + }; + + [evaluator evaluateBlock:^(JSContext* context) { + JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]); + } completion:showErrorIfNeeded]; + + [evaluator evaluateBlock:^(JSContext* context) { + context[@"wait"] = ^{ + [NSThread sleepForTimeInterval:0.01]; + }; + } completion:^(NSError* error) { + if (error) { + dispatch_async(dispatch_get_main_queue(), ^{ + NSLog(@"Error: %@", error); + }); + } + for (unsigned i = 0; i < scriptToEvaluate; i++) + [evaluator evaluateScript:@"this['counter']++; this['wait']();" completion:showErrorIfNeeded]; + }]; + + [evaluator waitForTasksDoneAndReportResults]; + } +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/Regress141809.h b/API/tests/Regress141809.h new file mode 100644 index 0000000..43b099c --- /dev/null +++ b/API/tests/Regress141809.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import +#import + +#if JSC_OBJC_API_ENABLED + +void runRegress141809(); + +#endif // JSC_OBJC_API_ENABLED + diff --git a/API/tests/Regress141809.mm b/API/tests/Regress141809.mm new file mode 100644 index 0000000..16fd373 --- /dev/null +++ b/API/tests/Regress141809.mm @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#import "config.h" +#import "Regress141809.h" + +#import +#import + +#if JSC_OBJC_API_ENABLED + +extern "C" void checkResult(NSString *description, bool passed); +extern "C" void JSSynchronousGarbageCollectForDebugging(JSContextRef); + +@protocol TestClassAExports +@end + +@interface TestClassA : NSObject +@end + +@implementation TestClassA +@end + +@protocol TestClassBExports +- (NSString *)name; +@end + +@interface TestClassB : TestClassA +@end + +@implementation TestClassB +- (NSString *)name +{ + return @"B"; +} +@end + +@protocol TestClassCExports +- (NSString *)name; +@end + +@interface TestClassC : TestClassB +@end + +@implementation TestClassC +- (NSString *)name +{ + return @"C"; +} +@end + +void runRegress141809() +{ + // Test that the ObjC API can correctly re-construct the synthesized + // prototype and constructor of JS exported ObjC classes. + // See + @autoreleasepool { + JSContext *context = [[JSContext alloc] init]; + context[@"print"] = ^(NSString* str) { + NSLog(@"%@", str); + }; + + [context evaluateScript:@"function dumpPrototypes(obj) { \ + var objDepth = 0; \ + var currObj = obj; \ + var objChain = ''; \ + do { \ + var propIndex = 0; \ + var props = ''; \ + Object.getOwnPropertyNames(currObj).forEach(function(val, idx, array) { \ + props += ((propIndex > 0 ? ', ' : '') + val); \ + propIndex++; \ + }); \ + var str = ''; \ + if (!objDepth) \ + str += 'obj '; \ + else { \ + for (i = 0; i < objDepth; i++) \ + str += ' '; \ + str += '--> proto '; \ + } \ + str += currObj; \ + if (props) \ + str += (' with ' + propIndex + ' props: ' + props); \ + print(str); \ + objChain += (str + '\\n'); \ + objDepth++; \ + currObj = Object.getPrototypeOf(currObj); \ + } while (currObj); \ + return { objDepth: objDepth, objChain: objChain }; \ + }"]; + JSValue* dumpPrototypes = context[@"dumpPrototypes"]; + + JSValue* resultBeforeGC = nil; + @autoreleasepool { + TestClassC* obj = [[TestClassC alloc] init]; + resultBeforeGC = [dumpPrototypes callWithArguments:@[obj]]; + } + + JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]); + + @autoreleasepool { + TestClassC* obj = [[TestClassC alloc] init]; + JSValue* resultAfterGC = [dumpPrototypes callWithArguments:@[obj]]; + checkResult(@"object and prototype chain depth is 5 deep", [resultAfterGC[@"objDepth"] toInt32] == 5); + checkResult(@"object and prototype chain depth before and after GC matches", [resultAfterGC[@"objDepth"] toInt32] == [resultBeforeGC[@"objDepth"] toInt32]); + checkResult(@"object and prototype chain before and after GC matches", [[resultAfterGC[@"objChain"] toString] isEqualToString:[resultBeforeGC[@"objChain"] toString]]); + } + } +} + +#endif // JSC_OBJC_API_ENABLED diff --git a/API/tests/TypedArrayCTest.cpp b/API/tests/TypedArrayCTest.cpp new file mode 100644 index 0000000..8ec8cdd --- /dev/null +++ b/API/tests/TypedArrayCTest.cpp @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "TypedArrayCTest.h" + +#include "JavaScriptCore.h" +#include + +extern "C" void JSSynchronousGarbageCollectForDebugging(JSContextRef); + +static void id(void*, void*) { } +static void freePtr(void* ptr, void*) +{ + free(ptr); +} + +static const unsigned numLengths = 3; + +static const unsigned lengths[numLengths] = +{ + 0, + 1, + 10, +}; + +static const unsigned byteSizes[kJSTypedArrayTypeArrayBuffer] = +{ + 1, // kJSTypedArrayTypeInt8Array + 2, // kJSTypedArrayTypeInt16Array + 4, // kJSTypedArrayTypeInt32Array + 1, // kJSTypedArrayTypeUint8Array + 1, // kJSTypedArrayTypeUint8ClampedArray + 2, // kJSTypedArrayTypeUint16Array + 4, // kJSTypedArrayTypeUint32Array + 4, // kJSTypedArrayTypeFloat32Array + 8, // kJSTypedArrayTypeFloat64Array +}; + +static const char* typeToString[kJSTypedArrayTypeArrayBuffer] = +{ + "kJSTypedArrayTypeInt8Array", + "kJSTypedArrayTypeInt16Array", + "kJSTypedArrayTypeInt32Array", + "kJSTypedArrayTypeUint8Array", + "kJSTypedArrayTypeUint8ClampedArray", + "kJSTypedArrayTypeUint16Array", + "kJSTypedArrayTypeUint32Array", + "kJSTypedArrayTypeFloat32Array", + "kJSTypedArrayTypeFloat64Array", +}; + +inline int unexpectedException(const char* name) +{ + fprintf(stderr, "%s FAILED: unexpected exception\n", name); + return 1; +} + +static int assertEqualsAsNumber(JSGlobalContextRef context, JSValueRef value, double expectedValue) +{ + double number = JSValueToNumber(context, value, nullptr); + if (number != expectedValue && !(isnan(number) && isnan(expectedValue))) { + fprintf(stderr, "assertEqualsAsNumber FAILED: %p, %lf\n", value, expectedValue); + return 1; + } + return 0; +} + +static int testAccess(JSGlobalContextRef context, JSObjectRef typedArray, JSTypedArrayType type, unsigned elementLength, void* expectedPtr = nullptr, JSObjectRef expectedBuffer = nullptr, unsigned expectedOffset = 0) +{ + JSValueRef exception = nullptr; + // Test typedArray basic functions. + JSTypedArrayType actualType = JSValueGetTypedArrayType(context, typedArray, &exception); + if (type != actualType || exception) { + fprintf(stderr, "TypedArray type FAILED: %p, got: %s, expected: %s\n", typedArray, typeToString[actualType], typeToString[type]); + return 1; + } + + unsigned length = JSObjectGetTypedArrayLength(context, typedArray, &exception); + if (elementLength != length || exception) { + fprintf(stderr, "TypedArray length FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], length, elementLength); + return 1; + } + + unsigned byteLength = JSObjectGetTypedArrayByteLength(context, typedArray, &exception); + unsigned expectedLength = byteSizes[type] * elementLength; + if (byteLength != expectedLength || exception) { + fprintf(stderr, "TypedArray byteLength FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], byteLength, expectedLength); + return 1; + } + + unsigned offset = JSObjectGetTypedArrayByteOffset(context, typedArray, &exception); + if (expectedOffset != offset || exception) { + fprintf(stderr, "TypedArray byteOffset FAILED: %p (%s), got: %d, expected: %d\n", typedArray, typeToString[type], offset, expectedOffset); + return 1; + } + + void* ptr = JSObjectGetTypedArrayBytesPtr(context, typedArray, &exception); + if (exception) + return unexpectedException("TypedArray get bytes ptr"); + + JSObjectRef buffer = JSObjectGetTypedArrayBuffer(context, typedArray, &exception); + if (exception) + return unexpectedException("TypedArray get buffer"); + + void* bufferPtr = JSObjectGetArrayBufferBytesPtr(context, buffer, &exception); + if (exception) + return unexpectedException("ArrayBuffer get bytes ptr"); + + if (bufferPtr != ptr) { + fprintf(stderr, "FAIL: TypedArray bytes ptr and ArrayBuffer byte ptr were not the same: %p (%s) TypedArray: %p, ArrayBuffer: %p\n", typedArray, typeToString[type], ptr, bufferPtr); + return 1; + } + + if (expectedPtr && ptr != expectedPtr) { + fprintf(stderr, "FAIL: TypedArray bytes ptr and the ptr used to construct the array were not the same: %p (%s) TypedArray: %p, bytes ptr: %p\n", typedArray, typeToString[type], ptr, expectedPtr); + return 1; + } + + if (expectedBuffer && expectedBuffer != buffer) { + fprintf(stderr, "FAIL: TypedArray buffer and the ArrayBuffer buffer used to construct the array were not the same: %p (%s) TypedArray buffer: %p, data: %p\n", typedArray, typeToString[type], buffer, expectedBuffer); + return 1; + } + + return 0; +} + +static int testConstructors(JSGlobalContextRef context, JSTypedArrayType type, unsigned length) +{ + int failed = 0; + JSValueRef exception = nullptr; + JSObjectRef typedArray; + + // Test create with length. + typedArray = JSObjectMakeTypedArray(context, type, length, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length); + + void* ptr = calloc(length, byteSizes[type]); // This is to be freed by data + JSObjectRef data = JSObjectMakeArrayBufferWithBytesNoCopy(context, ptr, length * byteSizes[type], freePtr, nullptr, &exception); + failed = failed || exception; + + // Test create with existing ptr. + typedArray = JSObjectMakeTypedArrayWithBytesNoCopy(context, type, ptr, length * byteSizes[type], id, nullptr, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr); + + // Test create with existing ArrayBuffer. + typedArray = JSObjectMakeTypedArrayWithArrayBuffer(context, type, data, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr, data); + + // Test create with existing ArrayBuffer and offset. + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, 0, length, &exception); + failed = failed || exception || testAccess(context, typedArray, type, length, ptr, data); + + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], length-1, &exception); + if (!length) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, length-1, ptr, data, byteSizes[type]) || exception; + + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], 3, &exception); + if (length < 2) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, 3, ptr, data, byteSizes[type]) || exception; + + if (byteSizes[type] > 1) { + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, 1, length-1, &exception); + failed = failed || !exception; + } + + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], length, &exception); + failed = failed || !exception; + + exception = nullptr; + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, type, data, byteSizes[type], 0, &exception); + if (!length) + failed = failed || !exception; + else + failed = failed || testAccess(context, typedArray, type, 0, ptr, data, byteSizes[type]) || exception; + + return failed; +} + +template +static int forEachTypedArrayType(const Functor& functor) +{ + int failed = 0; + for (unsigned i = 0; i < kJSTypedArrayTypeArrayBuffer; i++) + failed = failed || functor(static_cast(i)); + return failed; +} + +int testTypedArrayCAPI() +{ + int failed = 0; + JSGlobalContextRef context = JSGlobalContextCreate(nullptr); + + failed = failed || forEachTypedArrayType([&](JSTypedArrayType type) { + int failed = 0; + for (unsigned i = 0; i < numLengths; i++) + failed = failed || testConstructors(context, type, lengths[i]); + return failed; + }); + + // Test making a typedArray from scratch length. + volatile JSObjectRef typedArray = JSObjectMakeTypedArray(context, kJSTypedArrayTypeUint32Array, 10, nullptr); + JSObjectRef data = JSObjectGetTypedArrayBuffer(context, typedArray, nullptr); + unsigned* buffer = static_cast(JSObjectGetArrayBufferBytesPtr(context, data, nullptr)); + + ASSERT(JSObjectGetTypedArrayLength(context, typedArray, nullptr) == 10); + + // Test buffer is connected to typedArray. + buffer[1] = 1; + JSValueRef v = JSObjectGetPropertyAtIndex(context, typedArray, 1, nullptr); + failed = failed || assertEqualsAsNumber(context, v, 1); + + // Test passing a buffer from a new array to an old array + typedArray = JSObjectMakeTypedArrayWithBytesNoCopy(context, kJSTypedArrayTypeUint32Array, buffer, 40, id, nullptr, nullptr); + buffer = static_cast(JSObjectGetTypedArrayBytesPtr(context, typedArray, nullptr)); + ASSERT(buffer[1] == 1); + buffer[1] = 20; + ASSERT(((unsigned*)JSObjectGetArrayBufferBytesPtr(context, data, nullptr))[1] == 20); + + // Test constructing with data and the data returned are the same even with an offset. + typedArray = JSObjectMakeTypedArrayWithArrayBufferAndOffset(context, kJSTypedArrayTypeUint32Array, data, 4, 9, nullptr); + failed = failed || assertEqualsAsNumber(context, JSObjectGetPropertyAtIndex(context, typedArray, 0, nullptr), 20); + ASSERT(data == JSObjectGetTypedArrayBuffer(context, typedArray, nullptr)); + + // Test attempting to allocate an array too big for memory. + forEachTypedArrayType([&](JSTypedArrayType type) { + JSValueRef exception = nullptr; + JSObjectMakeTypedArray(context, type, UINT_MAX, &exception); + return !exception; + }); + + JSGlobalContextRelease(context); + + if (!failed) + printf("PASS: Typed Array C API Tests.\n"); + else + printf("FAIL: Some Typed Array C API Tests failed.\n"); + + return failed; +} diff --git a/API/tests/TypedArrayCTest.h b/API/tests/TypedArrayCTest.h new file mode 100644 index 0000000..dc66881 --- /dev/null +++ b/API/tests/TypedArrayCTest.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +int testTypedArrayCAPI(void); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/API/tests/minidom.c b/API/tests/minidom.c new file mode 100644 index 0000000..02b41a9 --- /dev/null +++ b/API/tests/minidom.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * Copyright (C) 2007 Alp Toker + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "JSContextRef.h" +#include "JSNode.h" +#include "JSObjectRef.h" +#include "JSStringRef.h" +#include +#include +#include + +static char* createStringWithContentsOfFile(const char* fileName); +static JSValueRef print(JSContextRef context, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception); + +int main(int argc, char* argv[]) +{ + const char *scriptPath = "minidom.js"; + if (argc > 1) { + scriptPath = argv[1]; + } + + JSGlobalContextRef context = JSGlobalContextCreateInGroup(NULL, NULL); + JSObjectRef globalObject = JSContextGetGlobalObject(context); + + JSStringRef printIString = JSStringCreateWithUTF8CString("print"); + JSObjectSetProperty(context, globalObject, printIString, JSObjectMakeFunctionWithCallback(context, printIString, print), kJSPropertyAttributeNone, NULL); + JSStringRelease(printIString); + + JSStringRef node = JSStringCreateWithUTF8CString("Node"); + JSObjectSetProperty(context, globalObject, node, JSObjectMakeConstructor(context, JSNode_class(context), JSNode_construct), kJSPropertyAttributeNone, NULL); + JSStringRelease(node); + + char* scriptUTF8 = createStringWithContentsOfFile(scriptPath); + JSStringRef script = JSStringCreateWithUTF8CString(scriptUTF8); + JSValueRef exception; + JSValueRef result = JSEvaluateScript(context, script, NULL, NULL, 1, &exception); + if (result) + printf("PASS: Test script executed successfully.\n"); + else { + printf("FAIL: Test script threw exception:\n"); + JSStringRef exceptionIString = JSValueToStringCopy(context, exception, NULL); + size_t exceptionUTF8Size = JSStringGetMaximumUTF8CStringSize(exceptionIString); + char* exceptionUTF8 = (char*)malloc(exceptionUTF8Size); + JSStringGetUTF8CString(exceptionIString, exceptionUTF8, exceptionUTF8Size); + printf("%s\n", exceptionUTF8); + free(exceptionUTF8); + JSStringRelease(exceptionIString); + } + JSStringRelease(script); + free(scriptUTF8); + + globalObject = 0; + JSGlobalContextRelease(context); + printf("PASS: Program exited normally.\n"); + return 0; +} + +static JSValueRef print(JSContextRef context, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) +{ + UNUSED_PARAM(object); + UNUSED_PARAM(thisObject); + + if (argumentCount > 0) { + JSStringRef string = JSValueToStringCopy(context, arguments[0], exception); + size_t numChars = JSStringGetMaximumUTF8CStringSize(string); + char stringUTF8[numChars]; + JSStringGetUTF8CString(string, stringUTF8, numChars); + printf("%s\n", stringUTF8); + } + + return JSValueMakeUndefined(context); +} + +static char* createStringWithContentsOfFile(const char* fileName) +{ + char* buffer; + + size_t buffer_size = 0; + size_t buffer_capacity = 1024; + buffer = (char*)malloc(buffer_capacity); + + FILE* f = fopen(fileName, "r"); + if (!f) { + fprintf(stderr, "Could not open file: %s\n", fileName); + free(buffer); + return 0; + } + + while (!feof(f) && !ferror(f)) { + buffer_size += fread(buffer + buffer_size, 1, buffer_capacity - buffer_size, f); + if (buffer_size == buffer_capacity) { /* guarantees space for trailing '\0' */ + buffer_capacity *= 2; + buffer = (char*)realloc(buffer, buffer_capacity); + ASSERT(buffer); + } + + ASSERT(buffer_size < buffer_capacity); + } + fclose(f); + buffer[buffer_size] = '\0'; + + return buffer; +} diff --git a/API/tests/minidom.html b/API/tests/minidom.html new file mode 100644 index 0000000..7ea4747 --- /dev/null +++ b/API/tests/minidom.html @@ -0,0 +1,9 @@ + + + + + + +

+
+
diff --git a/API/tests/minidom.js b/API/tests/minidom.js
new file mode 100644
index 0000000..85134d7
--- /dev/null
+++ b/API/tests/minidom.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2006 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+function shouldBe(a, b)
+{
+    var evalA;
+    try {
+        evalA = eval(a);
+    } catch(e) {
+        evalA = e;
+    }
+    
+    if (evalA == b || isNaN(evalA) && typeof evalA == 'number' && isNaN(b) && typeof b == 'number')
+        print("PASS: " + a + " should be " + b + " and is.", "green");
+    else
+        print("__FAIL__: " + a + " should be " + b + " but instead is " + evalA + ".", "red");
+}
+
+function test()
+{
+    print("Node is " + Node);
+    for (var p in Node)
+        print(p + ": " + Node[p]);
+    
+    node = new Node();
+    print("node is " + node);
+    for (var p in node)
+        print(p + ": " + node[p]);
+
+    child1 = new Node();
+    child2 = new Node();
+    child3 = new Node();
+    
+    node.appendChild(child1);
+    node.appendChild(child2);
+
+    var childNodes = node.childNodes;
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print("item " + i + ": " + childNodes.item(i));
+    }
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print(i + ": " + childNodes[i]);
+    }
+
+    node.removeChild(child1);
+    node.replaceChild(child3, child2);
+    
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print("item " + i + ": " + childNodes.item(i));
+    }
+
+    for (var i = 0; i < childNodes.length + 1; i++) {
+        print(i + ": " + childNodes[i]);
+    }
+
+    try {
+        node.appendChild(null);
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    try {
+        var o = new Object();
+        o.appendChild = node.appendChild;
+        o.appendChild(node);
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    try {
+        node.appendChild();
+    } catch(e) {
+        print("caught: " + e);
+    }
+    
+    oldNodeType = node.nodeType;
+    node.nodeType = 1;
+    shouldBe("node.nodeType", oldNodeType);
+    
+    shouldBe("node instanceof Node", true);
+    shouldBe("new Object() instanceof Node", false);
+    
+    print(Node);
+}
+
+test();
diff --git a/API/tests/testapi-function-overrides.js b/API/tests/testapi-function-overrides.js
new file mode 100644
index 0000000..363cced
--- /dev/null
+++ b/API/tests/testapi-function-overrides.js
@@ -0,0 +1,16 @@
+// testapi function overrides for testing.
+override %%%{ /* Original f1 */ }%%%
+with %%%{ /* Overridden f1 */ }%%%
+
+override #$%{
+    // Original f2
+}#$%
+with $$${ /* Overridden f2 */ }$$$
+
+override %%%{ /* Original f3 */ }%%%
+with %%%{ /* Overridden f3 */ }%%%
+
+override %%%{
+/* Original f4 */
+}%%%
+with %%%{ /* Overridden f4 */ }%%%
diff --git a/API/tests/testapi.c b/API/tests/testapi.c
new file mode 100644
index 0000000..b6c2451
--- /dev/null
+++ b/API/tests/testapi.c
@@ -0,0 +1,1989 @@
+/*
+ * Copyright (C) 2006, 2015-2016 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+
+#include "JavaScriptCore.h"
+#include "JSBasePrivate.h"
+#include "JSContextRefPrivate.h"
+#include "JSObjectRefPrivate.h"
+#include "JSScriptRefPrivate.h"
+#include "JSStringRefPrivate.h"
+#include 
+#define ASSERT_DISABLED 0
+#include 
+
+#if OS(WINDOWS)
+#include 
+#endif
+
+#include "CompareAndSwapTest.h"
+#include "CustomGlobalObjectClassTest.h"
+#include "ExecutionTimeLimitTest.h"
+#include "FunctionOverridesTest.h"
+#include "GlobalContextWithFinalizerTest.h"
+#include "JSONParseTest.h"
+#include "PingPongStackOverflowTest.h"
+#include "TypedArrayCTest.h"
+
+#if JSC_OBJC_API_ENABLED
+void testObjectiveCAPI(void);
+#endif
+
+bool assertTrue(bool value, const char* message);
+
+static JSGlobalContextRef context;
+int failed;
+static void assertEqualsAsBoolean(JSValueRef value, bool expectedValue)
+{
+    if (JSValueToBoolean(context, value) != expectedValue) {
+        fprintf(stderr, "assertEqualsAsBoolean failed: %p, %d\n", value, expectedValue);
+        failed = 1;
+    }
+}
+
+static void assertEqualsAsNumber(JSValueRef value, double expectedValue)
+{
+    double number = JSValueToNumber(context, value, NULL);
+
+    // FIXME  - On i386 the isnan(double) macro tries to map to the isnan(float) function,
+    // causing a build break with -Wshorten-64-to-32 enabled.  The issue is known by the appropriate team.
+    // After that's resolved, we can remove these casts
+    if (number != expectedValue && !(isnan((float)number) && isnan((float)expectedValue))) {
+        fprintf(stderr, "assertEqualsAsNumber failed: %p, %lf\n", value, expectedValue);
+        failed = 1;
+    }
+}
+
+static void assertEqualsAsUTF8String(JSValueRef value, const char* expectedValue)
+{
+    JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+
+    size_t jsSize = JSStringGetMaximumUTF8CStringSize(valueAsString);
+    char* jsBuffer = (char*)malloc(jsSize);
+    JSStringGetUTF8CString(valueAsString, jsBuffer, jsSize);
+
+    unsigned i;
+    for (i = 0; jsBuffer[i]; i++) {
+        if (jsBuffer[i] != expectedValue[i]) {
+            fprintf(stderr, "assertEqualsAsUTF8String failed at character %d: %c(%d) != %c(%d)\n", i, jsBuffer[i], jsBuffer[i], expectedValue[i], expectedValue[i]);
+            fprintf(stderr, "value: %s\n", jsBuffer);
+            fprintf(stderr, "expectedValue: %s\n", expectedValue);
+            failed = 1;
+        }
+    }
+
+    if (jsSize < strlen(jsBuffer) + 1) {
+        fprintf(stderr, "assertEqualsAsUTF8String failed: jsSize was too small\n");
+        failed = 1;
+    }
+
+    free(jsBuffer);
+    JSStringRelease(valueAsString);
+}
+
+static void assertEqualsAsCharactersPtr(JSValueRef value, const char* expectedValue)
+{
+    JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+
+    size_t jsLength = JSStringGetLength(valueAsString);
+    const JSChar* jsBuffer = JSStringGetCharactersPtr(valueAsString);
+
+    CFStringRef expectedValueAsCFString = CFStringCreateWithCString(kCFAllocatorDefault, 
+                                                                    expectedValue,
+                                                                    kCFStringEncodingUTF8);    
+    CFIndex cfLength = CFStringGetLength(expectedValueAsCFString);
+    UniChar* cfBuffer = (UniChar*)malloc(cfLength * sizeof(UniChar));
+    CFStringGetCharacters(expectedValueAsCFString, CFRangeMake(0, cfLength), cfBuffer);
+    CFRelease(expectedValueAsCFString);
+
+    if (memcmp(jsBuffer, cfBuffer, cfLength * sizeof(UniChar)) != 0) {
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsBuffer != cfBuffer\n");
+        failed = 1;
+    }
+    
+    if (jsLength != (size_t)cfLength) {
+#if OS(WINDOWS)
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsLength(%Iu) != cfLength(%Iu)\n", jsLength, (size_t)cfLength);
+#else
+        fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsLength(%zu) != cfLength(%zu)\n", jsLength, (size_t)cfLength);
+#endif
+        failed = 1;
+    }
+
+    free(cfBuffer);
+    JSStringRelease(valueAsString);
+}
+
+static bool timeZoneIsPST()
+{
+    char timeZoneName[70];
+    struct tm gtm;
+    memset(>m, 0, sizeof(gtm));
+    strftime(timeZoneName, sizeof(timeZoneName), "%Z", >m);
+
+    return 0 == strcmp("PST", timeZoneName);
+}
+
+static JSValueRef jsGlobalValue; // non-stack value for testing JSValueProtect()
+
+/* MyObject pseudo-class */
+
+static bool MyObject_hasProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "alwaysOne")
+        || JSStringIsEqualToUTF8CString(propertyName, "cantFind")
+        || JSStringIsEqualToUTF8CString(propertyName, "throwOnGet")
+        || JSStringIsEqualToUTF8CString(propertyName, "myPropertyName")
+        || JSStringIsEqualToUTF8CString(propertyName, "hasPropertyLie")
+        || JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        return true;
+    }
+    
+    return false;
+}
+
+static JSValueRef MyObject_getProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "alwaysOne")) {
+        return JSValueMakeNumber(context, 1);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "myPropertyName")) {
+        return JSValueMakeNumber(context, 1);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantFind")) {
+        return JSValueMakeUndefined(context);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "hasPropertyLie")) {
+        return 0;
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnGet")) {
+        return JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        *exception = JSValueMakeNumber(context, 1);
+        return JSValueMakeNumber(context, 1);
+    }
+    
+    return JSValueMakeNull(context);
+}
+
+static bool MyObject_setProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantSet"))
+        return true; // pretend we set the property in order to swallow it
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnSet")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+    }
+    
+    return false;
+}
+
+static bool MyObject_deleteProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "cantDelete"))
+        return true;
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "throwOnDelete")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return false;
+    }
+
+    return false;
+}
+
+static void MyObject_getPropertyNames(JSContextRef context, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    
+    JSStringRef propertyName;
+    
+    propertyName = JSStringCreateWithUTF8CString("alwaysOne");
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+    
+    propertyName = JSStringCreateWithUTF8CString("myPropertyName");
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+}
+
+static JSValueRef MyObject_callAsFunction(JSContextRef context, JSObjectRef object, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(exception);
+
+    if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnCall")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return JSValueMakeUndefined(context);
+    }
+
+    if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0)))
+        return JSValueMakeNumber(context, 1);
+    
+    return JSValueMakeUndefined(context);
+}
+
+static JSObjectRef MyObject_callAsConstructor(JSContextRef context, JSObjectRef object, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnConstruct")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return object;
+    }
+
+    if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0)))
+        return JSValueToObject(context, JSValueMakeNumber(context, 1), exception);
+    
+    return JSValueToObject(context, JSValueMakeNumber(context, 0), exception);
+}
+
+static bool MyObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructor);
+
+    if (JSValueIsString(context, possibleValue) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, possibleValue, 0), "throwOnHasInstance")) {
+        JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), constructor, JSStringCreateWithUTF8CString("test script"), 1, exception);
+        return false;
+    }
+
+    JSStringRef numberString = JSStringCreateWithUTF8CString("Number");
+    JSObjectRef numberConstructor = JSValueToObject(context, JSObjectGetProperty(context, JSContextGetGlobalObject(context), numberString, exception), exception);
+    JSStringRelease(numberString);
+
+    return JSValueIsInstanceOfConstructor(context, possibleValue, numberConstructor, exception);
+}
+
+static JSValueRef MyObject_convertToType(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(exception);
+    
+    switch (type) {
+    case kJSTypeNumber:
+        return JSValueMakeNumber(context, 1);
+    case kJSTypeString:
+        {
+            JSStringRef string = JSStringCreateWithUTF8CString("MyObjectAsString");
+            JSValueRef result = JSValueMakeString(context, string);
+            JSStringRelease(string);
+            return result;
+        }
+    default:
+        break;
+    }
+
+    // string conversion -- forward to default object class
+    return JSValueMakeNull(context);
+}
+
+static JSValueRef MyObject_convertToTypeWrapper(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(type);
+    UNUSED_PARAM(exception);
+    // Forward to default object class
+    return 0;
+}
+
+static bool MyObject_set_nullGetForwardSet(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+    return false; // Forward to parent class.
+}
+
+static JSStaticValue evilStaticValues[] = {
+    { "nullGetSet", 0, 0, kJSPropertyAttributeNone },
+    { "nullGetForwardSet", 0, MyObject_set_nullGetForwardSet, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static JSStaticFunction evilStaticFunctions[] = {
+    { "nullCall", 0, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+JSClassDefinition MyObject_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    evilStaticValues,
+    evilStaticFunctions,
+    
+    NULL,
+    NULL,
+    MyObject_hasProperty,
+    MyObject_getProperty,
+    MyObject_setProperty,
+    MyObject_deleteProperty,
+    MyObject_getPropertyNames,
+    MyObject_callAsFunction,
+    MyObject_callAsConstructor,
+    MyObject_hasInstance,
+    MyObject_convertToType,
+};
+
+JSClassDefinition MyObject_convertToTypeWrapperDefinition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    MyObject_convertToTypeWrapper,
+};
+
+JSClassDefinition MyObject_nullWrapperDefinition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "MyObject",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef MyObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassRef baseClass = JSClassCreate(&MyObject_definition);
+        MyObject_convertToTypeWrapperDefinition.parentClass = baseClass;
+        JSClassRef wrapperClass = JSClassCreate(&MyObject_convertToTypeWrapperDefinition);
+        MyObject_nullWrapperDefinition.parentClass = wrapperClass;
+        jsClass = JSClassCreate(&MyObject_nullWrapperDefinition);
+    }
+
+    return jsClass;
+}
+
+static JSValueRef PropertyCatchalls_getProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "x")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .x gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "y")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .y gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+    
+    if (JSStringIsEqualToUTF8CString(propertyName, "z")) {
+        static size_t count;
+        if (count++ < 5)
+            return NULL;
+
+        // Swallow all .y gets after 5, returning null.
+        return JSValueMakeNull(context);
+    }
+
+    return NULL;
+}
+
+static bool PropertyCatchalls_setProperty(JSContextRef context, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+    UNUSED_PARAM(exception);
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "x")) {
+        static size_t count;
+        if (count++ < 5)
+            return false;
+
+        // Swallow all .x sets after 4.
+        return true;
+    }
+
+    if (JSStringIsEqualToUTF8CString(propertyName, "make_throw") || JSStringIsEqualToUTF8CString(propertyName, "0")) {
+        *exception = JSValueMakeNumber(context, 5);
+        return true;
+    }
+
+    return false;
+}
+
+static void PropertyCatchalls_getPropertyNames(JSContextRef context, JSObjectRef object, JSPropertyNameAccumulatorRef propertyNames)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(object);
+
+    static size_t count;
+    static const char* numbers[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" };
+    
+    // Provide a property of a different name every time.
+    JSStringRef propertyName = JSStringCreateWithUTF8CString(numbers[count++ % 10]);
+    JSPropertyNameAccumulatorAddName(propertyNames, propertyName);
+    JSStringRelease(propertyName);
+}
+
+JSClassDefinition PropertyCatchalls_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    "PropertyCatchalls",
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    PropertyCatchalls_getProperty,
+    PropertyCatchalls_setProperty,
+    NULL,
+    PropertyCatchalls_getPropertyNames,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef PropertyCatchalls_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&PropertyCatchalls_definition);
+    
+    return jsClass;
+}
+
+static bool EvilExceptionObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructor);
+    
+    JSStringRef hasInstanceName = JSStringCreateWithUTF8CString("hasInstance");
+    JSValueRef hasInstance = JSObjectGetProperty(context, constructor, hasInstanceName, exception);
+    JSStringRelease(hasInstanceName);
+    if (!hasInstance)
+        return false;
+    JSObjectRef function = JSValueToObject(context, hasInstance, exception);
+    JSValueRef result = JSObjectCallAsFunction(context, function, constructor, 1, &possibleValue, exception);
+    return result && JSValueToBoolean(context, result);
+}
+
+static JSValueRef EvilExceptionObject_convertToType(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(exception);
+    JSStringRef funcName;
+    switch (type) {
+    case kJSTypeNumber:
+        funcName = JSStringCreateWithUTF8CString("toNumber");
+        break;
+    case kJSTypeString:
+        funcName = JSStringCreateWithUTF8CString("toStringExplicit");
+        break;
+    default:
+        return JSValueMakeNull(context);
+    }
+    
+    JSValueRef func = JSObjectGetProperty(context, object, funcName, exception);
+    JSStringRelease(funcName);    
+    JSObjectRef function = JSValueToObject(context, func, exception);
+    if (!function)
+        return JSValueMakeNull(context);
+    JSValueRef value = JSObjectCallAsFunction(context, function, object, 0, NULL, exception);
+    if (!value) {
+        JSStringRef errorString = JSStringCreateWithUTF8CString("convertToType failed"); 
+        JSValueRef errorStringRef = JSValueMakeString(context, errorString);
+        JSStringRelease(errorString);
+        return errorStringRef;
+    }
+    return value;
+}
+
+JSClassDefinition EvilExceptionObject_definition = {
+    0,
+    kJSClassAttributeNone,
+
+    "EvilExceptionObject",
+    NULL,
+
+    NULL,
+    NULL,
+
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    EvilExceptionObject_hasInstance,
+    EvilExceptionObject_convertToType,
+};
+
+static JSClassRef EvilExceptionObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+    
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&EvilExceptionObject_definition);
+    
+    return jsClass;
+}
+
+JSClassDefinition EmptyObject_definition = {
+    0,
+    kJSClassAttributeNone,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+};
+
+static JSClassRef EmptyObject_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+    
+    static JSClassRef jsClass;
+    if (!jsClass)
+        jsClass = JSClassCreate(&EmptyObject_definition);
+    
+    return jsClass;
+}
+
+
+static JSValueRef Base_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 1); // distinguish base get form derived get
+}
+
+static bool Base_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 1); // distinguish base set from derived set
+    return true;
+}
+
+static JSValueRef Base_callAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return JSValueMakeNumber(ctx, 1); // distinguish base call from derived call
+}
+
+static JSValueRef Base_returnHardNull(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return 0; // should convert to undefined!
+}
+
+static JSStaticFunction Base_staticFunctions[] = {
+    { "baseProtoDup", NULL, kJSPropertyAttributeNone },
+    { "baseProto", Base_callAsFunction, kJSPropertyAttributeNone },
+    { "baseHardNull", Base_returnHardNull, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static JSStaticValue Base_staticValues[] = {
+    { "baseDup", Base_get, Base_set, kJSPropertyAttributeNone },
+    { "baseOnly", Base_get, Base_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static bool TestInitializeFinalize;
+static void Base_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(context);
+
+    if (TestInitializeFinalize) {
+        ASSERT((void*)1 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)2);
+    }
+}
+
+static unsigned Base_didFinalize;
+static void Base_finalize(JSObjectRef object)
+{
+    UNUSED_PARAM(object);
+    if (TestInitializeFinalize) {
+        ASSERT((void*)4 == JSObjectGetPrivate(object));
+        Base_didFinalize = true;
+    }
+}
+
+static JSClassRef Base_class(JSContextRef context)
+{
+    UNUSED_PARAM(context);
+
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.staticValues = Base_staticValues;
+        definition.staticFunctions = Base_staticFunctions;
+        definition.initialize = Base_initialize;
+        definition.finalize = Base_finalize;
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSValueRef Derived_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 2); // distinguish base get form derived get
+}
+
+static bool Derived_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(ctx);
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 2); // distinguish base set from derived set
+    return true;
+}
+
+static JSValueRef Derived_callAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return JSValueMakeNumber(ctx, 2); // distinguish base call from derived call
+}
+
+static JSStaticFunction Derived_staticFunctions[] = {
+    { "protoOnly", Derived_callAsFunction, kJSPropertyAttributeNone },
+    { "protoDup", NULL, kJSPropertyAttributeNone },
+    { "baseProtoDup", Derived_callAsFunction, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static JSStaticValue Derived_staticValues[] = {
+    { "derivedOnly", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { "protoDup", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { "baseDup", Derived_get, Derived_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static void Derived_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(context);
+
+    if (TestInitializeFinalize) {
+        ASSERT((void*)2 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)3);
+    }
+}
+
+static void Derived_finalize(JSObjectRef object)
+{
+    if (TestInitializeFinalize) {
+        ASSERT((void*)3 == JSObjectGetPrivate(object));
+        JSObjectSetPrivate(object, (void*)4);
+    }
+}
+
+static JSClassRef Derived_class(JSContextRef context)
+{
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.parentClass = Base_class(context);
+        definition.staticValues = Derived_staticValues;
+        definition.staticFunctions = Derived_staticFunctions;
+        definition.initialize = Derived_initialize;
+        definition.finalize = Derived_finalize;
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSClassRef Derived2_class(JSContextRef context)
+{
+    static JSClassRef jsClass;
+    if (!jsClass) {
+        JSClassDefinition definition = kJSClassDefinitionEmpty;
+        definition.parentClass = Derived_class(context);
+        jsClass = JSClassCreate(&definition);
+    }
+    return jsClass;
+}
+
+static JSValueRef print_callAsFunction(JSContextRef ctx, JSObjectRef functionObject, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(functionObject);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(exception);
+
+    ASSERT(JSContextGetGlobalContext(ctx) == context);
+    
+    if (argumentCount > 0) {
+        JSStringRef string = JSValueToStringCopy(ctx, arguments[0], NULL);
+        size_t sizeUTF8 = JSStringGetMaximumUTF8CStringSize(string);
+        char* stringUTF8 = (char*)malloc(sizeUTF8);
+        JSStringGetUTF8CString(string, stringUTF8, sizeUTF8);
+        printf("%s\n", stringUTF8);
+        free(stringUTF8);
+        JSStringRelease(string);
+    }
+    
+    return JSValueMakeUndefined(ctx);
+}
+
+static JSObjectRef myConstructor_callAsConstructor(JSContextRef context, JSObjectRef constructorObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(constructorObject);
+    UNUSED_PARAM(exception);
+    
+    JSObjectRef result = JSObjectMake(context, NULL, NULL);
+    if (argumentCount > 0) {
+        JSStringRef value = JSStringCreateWithUTF8CString("value");
+        JSObjectSetProperty(context, result, value, arguments[0], kJSPropertyAttributeNone, NULL);
+        JSStringRelease(value);
+    }
+    
+    return result;
+}
+
+static JSObjectRef myBadConstructor_callAsConstructor(JSContextRef context, JSObjectRef constructorObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(context);
+    UNUSED_PARAM(constructorObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    
+    return 0;
+}
+
+
+static void globalObject_initialize(JSContextRef context, JSObjectRef object)
+{
+    UNUSED_PARAM(object);
+    // Ensure that an execution context is passed in
+    ASSERT(context);
+
+    JSObjectRef globalObject = JSContextGetGlobalObject(context);
+    ASSERT(globalObject);
+
+    // Ensure that the standard global properties have been set on the global object
+    JSStringRef array = JSStringCreateWithUTF8CString("Array");
+    JSObjectRef arrayConstructor = JSValueToObject(context, JSObjectGetProperty(context, globalObject, array, NULL), NULL);
+    JSStringRelease(array);
+
+    UNUSED_PARAM(arrayConstructor);
+    ASSERT(arrayConstructor);
+}
+
+static JSValueRef globalObject_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 3);
+}
+
+static bool globalObject_set(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef value, JSValueRef* exception)
+{
+    UNUSED_PARAM(object);
+    UNUSED_PARAM(propertyName);
+    UNUSED_PARAM(value);
+
+    *exception = JSValueMakeNumber(ctx, 3);
+    return true;
+}
+
+static JSValueRef globalObject_call(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+
+    return JSValueMakeNumber(ctx, 3);
+}
+
+static JSValueRef functionGC(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    UNUSED_PARAM(exception);
+    JSGarbageCollect(context);
+    return JSValueMakeUndefined(context);
+}
+
+static JSStaticValue globalObject_staticValues[] = {
+    { "globalStaticValue", globalObject_get, globalObject_set, kJSPropertyAttributeNone },
+    { 0, 0, 0, 0 }
+};
+
+static JSStaticFunction globalObject_staticFunctions[] = {
+    { "globalStaticFunction", globalObject_call, kJSPropertyAttributeNone },
+    { "globalStaticFunction2", globalObject_call, kJSPropertyAttributeNone },
+    { "gc", functionGC, kJSPropertyAttributeNone },
+    { 0, 0, 0 }
+};
+
+static char* createStringWithContentsOfFile(const char* fileName);
+
+static void testInitializeFinalize()
+{
+    JSObjectRef o = JSObjectMake(context, Derived_class(context), (void*)1);
+    UNUSED_PARAM(o);
+    ASSERT(JSObjectGetPrivate(o) == (void*)3);
+}
+
+static JSValueRef jsNumberValue =  NULL;
+
+static JSObjectRef aHeapRef = NULL;
+
+static void makeGlobalNumberValue(JSContextRef context) {
+    JSValueRef v = JSValueMakeNumber(context, 420);
+    JSValueProtect(context, v);
+    jsNumberValue = v;
+    v = NULL;
+}
+
+bool assertTrue(bool value, const char* message)
+{
+    if (!value) {
+        if (message)
+            fprintf(stderr, "assertTrue failed: '%s'\n", message);
+        else
+            fprintf(stderr, "assertTrue failed.\n");
+        failed = 1;
+    }
+    return value;
+}
+
+static bool checkForCycleInPrototypeChain()
+{
+    bool result = true;
+    JSGlobalContextRef context = JSGlobalContextCreate(0);
+    JSObjectRef object1 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+    JSObjectRef object2 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+    JSObjectRef object3 = JSObjectMake(context, /* jsClass */ 0, /* data */ 0);
+
+    JSObjectSetPrototype(context, object1, JSValueMakeNull(context));
+    ASSERT(JSValueIsNull(context, JSObjectGetPrototype(context, object1)));
+
+    // object1 -> object1
+    JSObjectSetPrototype(context, object1, object1);
+    result &= assertTrue(JSValueIsNull(context, JSObjectGetPrototype(context, object1)), "It is possible to assign self as a prototype");
+
+    // object1 -> object2 -> object1
+    JSObjectSetPrototype(context, object2, object1);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object2), object1));
+    JSObjectSetPrototype(context, object1, object2);
+    result &= assertTrue(JSValueIsNull(context, JSObjectGetPrototype(context, object1)), "It is possible to close a prototype chain cycle");
+
+    // object1 -> object2 -> object3 -> object1
+    JSObjectSetPrototype(context, object2, object3);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object2), object3));
+    JSObjectSetPrototype(context, object1, object2);
+    ASSERT(JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object1), object2));
+    JSObjectSetPrototype(context, object3, object1);
+    result &= assertTrue(!JSValueIsStrictEqual(context, JSObjectGetPrototype(context, object3), object1), "It is possible to close a prototype chain cycle");
+
+    JSValueRef exception;
+    JSStringRef code = JSStringCreateWithUTF8CString("o = { }; p = { }; o.__proto__ = p; p.__proto__ = o");
+    JSStringRef file = JSStringCreateWithUTF8CString("");
+    result &= assertTrue(!JSEvaluateScript(context, code, /* thisObject*/ 0, file, 1, &exception)
+                         , "An exception should be thrown");
+
+    JSStringRelease(code);
+    JSStringRelease(file);
+    JSGlobalContextRelease(context);
+    return result;
+}
+
+static JSValueRef valueToObjectExceptionCallAsFunction(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
+{
+    UNUSED_PARAM(function);
+    UNUSED_PARAM(thisObject);
+    UNUSED_PARAM(argumentCount);
+    UNUSED_PARAM(arguments);
+    JSValueRef jsUndefined = JSValueMakeUndefined(JSContextGetGlobalContext(ctx));
+    JSValueToObject(JSContextGetGlobalContext(ctx), jsUndefined, exception);
+    
+    return JSValueMakeUndefined(ctx);
+}
+static bool valueToObjectExceptionTest()
+{
+    JSGlobalContextRef testContext;
+    JSClassDefinition globalObjectClassDefinition = kJSClassDefinitionEmpty;
+    globalObjectClassDefinition.initialize = globalObject_initialize;
+    globalObjectClassDefinition.staticValues = globalObject_staticValues;
+    globalObjectClassDefinition.staticFunctions = globalObject_staticFunctions;
+    globalObjectClassDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef globalObjectClass = JSClassCreate(&globalObjectClassDefinition);
+    testContext = JSGlobalContextCreateInGroup(NULL, globalObjectClass);
+    JSObjectRef globalObject = JSContextGetGlobalObject(testContext);
+
+    JSStringRef valueToObject = JSStringCreateWithUTF8CString("valueToObject");
+    JSObjectRef valueToObjectFunction = JSObjectMakeFunctionWithCallback(testContext, valueToObject, valueToObjectExceptionCallAsFunction);
+    JSObjectSetProperty(testContext, globalObject, valueToObject, valueToObjectFunction, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(valueToObject);
+
+    JSStringRef test = JSStringCreateWithUTF8CString("valueToObject();");
+    JSEvaluateScript(testContext, test, NULL, NULL, 1, NULL);
+    
+    JSStringRelease(test);
+    JSClassRelease(globalObjectClass);
+    JSGlobalContextRelease(testContext);
+    
+    return true;
+}
+
+static bool globalContextNameTest()
+{
+    bool result = true;
+    JSGlobalContextRef context = JSGlobalContextCreate(0);
+
+    JSStringRef str = JSGlobalContextCopyName(context);
+    result &= assertTrue(!str, "Default context name is NULL");
+
+    JSStringRef name1 = JSStringCreateWithUTF8CString("name1");
+    JSStringRef name2 = JSStringCreateWithUTF8CString("name2");
+
+    JSGlobalContextSetName(context, name1);
+    JSStringRef fetchName1 = JSGlobalContextCopyName(context);
+    JSGlobalContextSetName(context, name2);
+    JSStringRef fetchName2 = JSGlobalContextCopyName(context);
+    JSGlobalContextSetName(context, NULL);
+    JSStringRef fetchName3 = JSGlobalContextCopyName(context);
+
+    result &= assertTrue(JSStringIsEqual(name1, fetchName1), "Unexpected Context name");
+    result &= assertTrue(JSStringIsEqual(name2, fetchName2), "Unexpected Context name");
+    result &= assertTrue(!JSStringIsEqual(fetchName1, fetchName2), "Unexpected Context name");
+    result &= assertTrue(!fetchName3, "Unexpected Context name");
+
+    JSStringRelease(name1);
+    JSStringRelease(name2);
+    JSStringRelease(fetchName1);
+    JSStringRelease(fetchName2);
+
+    return result;
+}
+
+static void checkConstnessInJSObjectNames()
+{
+    JSStaticFunction fun;
+    fun.name = "something";
+    JSStaticValue val;
+    val.name = "something";
+}
+
+int main(int argc, char* argv[])
+{
+#if OS(WINDOWS)
+    // Cygwin calls ::SetErrorMode(SEM_FAILCRITICALERRORS), which we will inherit. This is bad for
+    // testing/debugging, as it causes the post-mortem debugger not to be invoked. We reset the
+    // error mode here to work around Cygwin's behavior. See .
+    ::SetErrorMode(0);
+#endif
+
+    testCompareAndSwap();
+
+#if JSC_OBJC_API_ENABLED
+    testObjectiveCAPI();
+#endif
+
+    const char *scriptPath = "testapi.js";
+    if (argc > 1) {
+        scriptPath = argv[1];
+    }
+    
+    // Test garbage collection with a fresh context
+    context = JSGlobalContextCreateInGroup(NULL, NULL);
+    TestInitializeFinalize = true;
+    testInitializeFinalize();
+    JSGlobalContextRelease(context);
+    TestInitializeFinalize = false;
+
+    ASSERT(Base_didFinalize);
+
+    JSClassDefinition globalObjectClassDefinition = kJSClassDefinitionEmpty;
+    globalObjectClassDefinition.initialize = globalObject_initialize;
+    globalObjectClassDefinition.staticValues = globalObject_staticValues;
+    globalObjectClassDefinition.staticFunctions = globalObject_staticFunctions;
+    globalObjectClassDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef globalObjectClass = JSClassCreate(&globalObjectClassDefinition);
+    context = JSGlobalContextCreateInGroup(NULL, globalObjectClass);
+
+    JSContextGroupRef contextGroup = JSContextGetGroup(context);
+    
+    JSGlobalContextRetain(context);
+    JSGlobalContextRelease(context);
+    ASSERT(JSContextGetGlobalContext(context) == context);
+    
+    JSReportExtraMemoryCost(context, 0);
+    JSReportExtraMemoryCost(context, 1);
+    JSReportExtraMemoryCost(context, 1024);
+
+    JSObjectRef globalObject = JSContextGetGlobalObject(context);
+    ASSERT(JSValueIsObject(context, globalObject));
+    
+    JSValueRef jsUndefined = JSValueMakeUndefined(context);
+    JSValueRef jsNull = JSValueMakeNull(context);
+    JSValueRef jsTrue = JSValueMakeBoolean(context, true);
+    JSValueRef jsFalse = JSValueMakeBoolean(context, false);
+    JSValueRef jsZero = JSValueMakeNumber(context, 0);
+    JSValueRef jsOne = JSValueMakeNumber(context, 1);
+    JSValueRef jsOneThird = JSValueMakeNumber(context, 1.0 / 3.0);
+    JSObjectRef jsObjectNoProto = JSObjectMake(context, NULL, NULL);
+    JSObjectSetPrototype(context, jsObjectNoProto, JSValueMakeNull(context));
+
+    JSObjectSetPrivate(globalObject, (void*)123);
+    if (JSObjectGetPrivate(globalObject) != (void*)123) {
+        printf("FAIL: Didn't return private data when set by JSObjectSetPrivate().\n");
+        failed = 1;
+    } else
+        printf("PASS: returned private data when set by JSObjectSetPrivate().\n");
+
+    // FIXME: test funny utf8 characters
+    JSStringRef jsEmptyIString = JSStringCreateWithUTF8CString("");
+    JSValueRef jsEmptyString = JSValueMakeString(context, jsEmptyIString);
+    
+    JSStringRef jsOneIString = JSStringCreateWithUTF8CString("1");
+    JSValueRef jsOneString = JSValueMakeString(context, jsOneIString);
+
+    UniChar singleUniChar = 65; // Capital A
+    CFMutableStringRef cfString = 
+        CFStringCreateMutableWithExternalCharactersNoCopy(kCFAllocatorDefault,
+                                                          &singleUniChar,
+                                                          1,
+                                                          1,
+                                                          kCFAllocatorNull);
+
+    JSStringRef jsCFIString = JSStringCreateWithCFString(cfString);
+    JSValueRef jsCFString = JSValueMakeString(context, jsCFIString);
+    
+    CFStringRef cfEmptyString = CFStringCreateWithCString(kCFAllocatorDefault, "", kCFStringEncodingUTF8);
+    
+    JSStringRef jsCFEmptyIString = JSStringCreateWithCFString(cfEmptyString);
+    JSValueRef jsCFEmptyString = JSValueMakeString(context, jsCFEmptyIString);
+
+    CFIndex cfStringLength = CFStringGetLength(cfString);
+    UniChar* buffer = (UniChar*)malloc(cfStringLength * sizeof(UniChar));
+    CFStringGetCharacters(cfString, 
+                          CFRangeMake(0, cfStringLength), 
+                          buffer);
+    JSStringRef jsCFIStringWithCharacters = JSStringCreateWithCharacters((JSChar*)buffer, cfStringLength);
+    JSValueRef jsCFStringWithCharacters = JSValueMakeString(context, jsCFIStringWithCharacters);
+    
+    JSStringRef jsCFEmptyIStringWithCharacters = JSStringCreateWithCharacters((JSChar*)buffer, CFStringGetLength(cfEmptyString));
+    free(buffer);
+    JSValueRef jsCFEmptyStringWithCharacters = JSValueMakeString(context, jsCFEmptyIStringWithCharacters);
+
+    JSChar constantString[] = { 'H', 'e', 'l', 'l', 'o', };
+    JSStringRef constantStringRef = JSStringCreateWithCharactersNoCopy(constantString, sizeof(constantString) / sizeof(constantString[0]));
+    ASSERT(JSStringGetCharactersPtr(constantStringRef) == constantString);
+    JSStringRelease(constantStringRef);
+
+    ASSERT(JSValueGetType(context, NULL) == kJSTypeNull);
+    ASSERT(JSValueGetType(context, jsUndefined) == kJSTypeUndefined);
+    ASSERT(JSValueGetType(context, jsNull) == kJSTypeNull);
+    ASSERT(JSValueGetType(context, jsTrue) == kJSTypeBoolean);
+    ASSERT(JSValueGetType(context, jsFalse) == kJSTypeBoolean);
+    ASSERT(JSValueGetType(context, jsZero) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsOne) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsOneThird) == kJSTypeNumber);
+    ASSERT(JSValueGetType(context, jsEmptyString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsOneString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFStringWithCharacters) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFEmptyString) == kJSTypeString);
+    ASSERT(JSValueGetType(context, jsCFEmptyStringWithCharacters) == kJSTypeString);
+
+    ASSERT(!JSValueIsBoolean(context, NULL));
+    ASSERT(!JSValueIsObject(context, NULL));
+    ASSERT(!JSValueIsArray(context, NULL));
+    ASSERT(!JSValueIsDate(context, NULL));
+    ASSERT(!JSValueIsString(context, NULL));
+    ASSERT(!JSValueIsNumber(context, NULL));
+    ASSERT(!JSValueIsUndefined(context, NULL));
+    ASSERT(JSValueIsNull(context, NULL));
+    ASSERT(!JSObjectCallAsFunction(context, NULL, NULL, 0, NULL, NULL));
+    ASSERT(!JSObjectCallAsConstructor(context, NULL, 0, NULL, NULL));
+    ASSERT(!JSObjectIsConstructor(context, NULL));
+    ASSERT(!JSObjectIsFunction(context, NULL));
+
+    JSStringRef nullString = JSStringCreateWithUTF8CString(0);
+    const JSChar* characters = JSStringGetCharactersPtr(nullString);
+    if (characters) {
+        printf("FAIL: Didn't return null when accessing character pointer of a null String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned null when accessing character pointer of a null String.\n");
+
+    JSStringRef emptyString = JSStringCreateWithCFString(CFSTR(""));
+    characters = JSStringGetCharactersPtr(emptyString);
+    if (!characters) {
+        printf("FAIL: Returned null when accessing character pointer of an empty String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned empty when accessing character pointer of an empty String.\n");
+
+    size_t length = JSStringGetLength(nullString);
+    if (length) {
+        printf("FAIL: Didn't return 0 length for null String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned 0 length for null String.\n");
+    JSStringRelease(nullString);
+
+    length = JSStringGetLength(emptyString);
+    if (length) {
+        printf("FAIL: Didn't return 0 length for empty String.\n");
+        failed = 1;
+    } else
+        printf("PASS: returned 0 length for empty String.\n");
+    JSStringRelease(emptyString);
+
+    JSObjectRef propertyCatchalls = JSObjectMake(context, PropertyCatchalls_class(context), NULL);
+    JSStringRef propertyCatchallsString = JSStringCreateWithUTF8CString("PropertyCatchalls");
+    JSObjectSetProperty(context, globalObject, propertyCatchallsString, propertyCatchalls, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(propertyCatchallsString);
+
+    JSObjectRef myObject = JSObjectMake(context, MyObject_class(context), NULL);
+    JSStringRef myObjectIString = JSStringCreateWithUTF8CString("MyObject");
+    JSObjectSetProperty(context, globalObject, myObjectIString, myObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myObjectIString);
+    
+    JSObjectRef EvilExceptionObject = JSObjectMake(context, EvilExceptionObject_class(context), NULL);
+    JSStringRef EvilExceptionObjectIString = JSStringCreateWithUTF8CString("EvilExceptionObject");
+    JSObjectSetProperty(context, globalObject, EvilExceptionObjectIString, EvilExceptionObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(EvilExceptionObjectIString);
+    
+    JSObjectRef EmptyObject = JSObjectMake(context, EmptyObject_class(context), NULL);
+    JSStringRef EmptyObjectIString = JSStringCreateWithUTF8CString("EmptyObject");
+    JSObjectSetProperty(context, globalObject, EmptyObjectIString, EmptyObject, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(EmptyObjectIString);
+    
+    JSStringRef lengthStr = JSStringCreateWithUTF8CString("length");
+    JSObjectRef aStackRef = JSObjectMakeArray(context, 0, 0, 0);
+    aHeapRef = aStackRef;
+    JSObjectSetProperty(context, aHeapRef, lengthStr, JSValueMakeNumber(context, 10), 0, 0);
+    JSStringRef privatePropertyName = JSStringCreateWithUTF8CString("privateProperty");
+    if (!JSObjectSetPrivateProperty(context, myObject, privatePropertyName, aHeapRef)) {
+        printf("FAIL: Could not set private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Set private property.\n");
+    aStackRef = 0;
+    if (JSObjectSetPrivateProperty(context, aHeapRef, privatePropertyName, aHeapRef)) {
+        printf("FAIL: JSObjectSetPrivateProperty should fail on non-API objects.\n");
+        failed = 1;
+    } else
+        printf("PASS: Did not allow JSObjectSetPrivateProperty on a non-API object.\n");
+    if (JSObjectGetPrivateProperty(context, myObject, privatePropertyName) != aHeapRef) {
+        printf("FAIL: Could not retrieve private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Retrieved private property.\n");
+    if (JSObjectGetPrivateProperty(context, aHeapRef, privatePropertyName)) {
+        printf("FAIL: JSObjectGetPrivateProperty should return NULL when called on a non-API object.\n");
+        failed = 1;
+    } else
+        printf("PASS: JSObjectGetPrivateProperty return NULL.\n");
+
+    if (JSObjectGetProperty(context, myObject, privatePropertyName, 0) == aHeapRef) {
+        printf("FAIL: Accessed private property through ordinary property lookup.\n");
+        failed = 1;
+    } else
+        printf("PASS: Cannot access private property through ordinary property lookup.\n");
+
+    JSGarbageCollect(context);
+
+    for (int i = 0; i < 10000; i++)
+        JSObjectMake(context, 0, 0);
+
+    aHeapRef = JSValueToObject(context, JSObjectGetPrivateProperty(context, myObject, privatePropertyName), 0);
+    if (JSValueToNumber(context, JSObjectGetProperty(context, aHeapRef, lengthStr, 0), 0) != 10) {
+        printf("FAIL: Private property has been collected.\n");
+        failed = 1;
+    } else
+        printf("PASS: Private property does not appear to have been collected.\n");
+    JSStringRelease(lengthStr);
+
+    if (!JSObjectSetPrivateProperty(context, myObject, privatePropertyName, 0)) {
+        printf("FAIL: Could not set private property to NULL.\n");
+        failed = 1;
+    } else
+        printf("PASS: Set private property to NULL.\n");
+    if (JSObjectGetPrivateProperty(context, myObject, privatePropertyName)) {
+        printf("FAIL: Could not retrieve private property.\n");
+        failed = 1;
+    } else
+        printf("PASS: Retrieved private property.\n");
+
+    JSStringRef nullJSON = JSStringCreateWithUTF8CString(0);
+    JSValueRef nullJSONObject = JSValueMakeFromJSONString(context, nullJSON);
+    if (nullJSONObject) {
+        printf("FAIL: Did not parse null String as JSON correctly\n");
+        failed = 1;
+    } else
+        printf("PASS: Parsed null String as JSON correctly.\n");
+    JSStringRelease(nullJSON);
+
+    JSStringRef validJSON = JSStringCreateWithUTF8CString("{\"aProperty\":true}");
+    JSValueRef jsonObject = JSValueMakeFromJSONString(context, validJSON);
+    JSStringRelease(validJSON);
+    if (!JSValueIsObject(context, jsonObject)) {
+        printf("FAIL: Did not parse valid JSON correctly\n");
+        failed = 1;
+    } else
+        printf("PASS: Parsed valid JSON string.\n");
+    JSStringRef propertyName = JSStringCreateWithUTF8CString("aProperty");
+    assertEqualsAsBoolean(JSObjectGetProperty(context, JSValueToObject(context, jsonObject, 0), propertyName, 0), true);
+    JSStringRelease(propertyName);
+    JSStringRef invalidJSON = JSStringCreateWithUTF8CString("fail!");
+    if (JSValueMakeFromJSONString(context, invalidJSON)) {
+        printf("FAIL: Should return null for invalid JSON data\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly returned null for invalid JSON data.\n");
+    JSValueRef exception;
+    JSStringRef str = JSValueCreateJSONString(context, jsonObject, 0, 0);
+    if (!JSStringIsEqualToUTF8CString(str, "{\"aProperty\":true}")) {
+        printf("FAIL: Did not correctly serialise with indent of 0.\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly serialised with indent of 0.\n");
+    JSStringRelease(str);
+
+    str = JSValueCreateJSONString(context, jsonObject, 4, 0);
+    if (!JSStringIsEqualToUTF8CString(str, "{\n    \"aProperty\": true\n}")) {
+        printf("FAIL: Did not correctly serialise with indent of 4.\n");
+        failed = 1;
+    } else
+        printf("PASS: Correctly serialised with indent of 4.\n");
+    JSStringRelease(str);
+
+    str = JSStringCreateWithUTF8CString("({get a(){ throw '';}})");
+    JSValueRef unstringifiableObj = JSEvaluateScript(context, str, NULL, NULL, 1, NULL);
+    JSStringRelease(str);
+    
+    str = JSValueCreateJSONString(context, unstringifiableObj, 4, 0);
+    if (str) {
+        printf("FAIL: Didn't return null when attempting to serialize unserializable value.\n");
+        JSStringRelease(str);
+        failed = 1;
+    } else
+        printf("PASS: returned null when attempting to serialize unserializable value.\n");
+    
+    str = JSValueCreateJSONString(context, unstringifiableObj, 4, &exception);
+    if (str) {
+        printf("FAIL: Didn't return null when attempting to serialize unserializable value.\n");
+        JSStringRelease(str);
+        failed = 1;
+    } else
+        printf("PASS: returned null when attempting to serialize unserializable value.\n");
+    if (!exception) {
+        printf("FAIL: Did not set exception on serialisation error\n");
+        failed = 1;
+    } else
+        printf("PASS: set exception on serialisation error\n");
+    // Conversions that throw exceptions
+    exception = NULL;
+    ASSERT(NULL == JSValueToObject(context, jsNull, &exception));
+    ASSERT(exception);
+    
+    exception = NULL;
+    // FIXME  - On i386 the isnan(double) macro tries to map to the isnan(float) function,
+    // causing a build break with -Wshorten-64-to-32 enabled.  The issue is known by the appropriate team.
+    // After that's resolved, we can remove these casts
+    ASSERT(isnan((float)JSValueToNumber(context, jsObjectNoProto, &exception)));
+    ASSERT(exception);
+
+    exception = NULL;
+    ASSERT(!JSValueToStringCopy(context, jsObjectNoProto, &exception));
+    ASSERT(exception);
+    
+    ASSERT(JSValueToBoolean(context, myObject));
+    
+    exception = NULL;
+    ASSERT(!JSValueIsEqual(context, jsObjectNoProto, JSValueMakeNumber(context, 1), &exception));
+    ASSERT(exception);
+    
+    exception = NULL;
+    JSObjectGetPropertyAtIndex(context, myObject, 0, &exception);
+    ASSERT(1 == JSValueToNumber(context, exception, NULL));
+
+    assertEqualsAsBoolean(jsUndefined, false);
+    assertEqualsAsBoolean(jsNull, false);
+    assertEqualsAsBoolean(jsTrue, true);
+    assertEqualsAsBoolean(jsFalse, false);
+    assertEqualsAsBoolean(jsZero, false);
+    assertEqualsAsBoolean(jsOne, true);
+    assertEqualsAsBoolean(jsOneThird, true);
+    assertEqualsAsBoolean(jsEmptyString, false);
+    assertEqualsAsBoolean(jsOneString, true);
+    assertEqualsAsBoolean(jsCFString, true);
+    assertEqualsAsBoolean(jsCFStringWithCharacters, true);
+    assertEqualsAsBoolean(jsCFEmptyString, false);
+    assertEqualsAsBoolean(jsCFEmptyStringWithCharacters, false);
+    
+    assertEqualsAsNumber(jsUndefined, nan(""));
+    assertEqualsAsNumber(jsNull, 0);
+    assertEqualsAsNumber(jsTrue, 1);
+    assertEqualsAsNumber(jsFalse, 0);
+    assertEqualsAsNumber(jsZero, 0);
+    assertEqualsAsNumber(jsOne, 1);
+    assertEqualsAsNumber(jsOneThird, 1.0 / 3.0);
+    assertEqualsAsNumber(jsEmptyString, 0);
+    assertEqualsAsNumber(jsOneString, 1);
+    assertEqualsAsNumber(jsCFString, nan(""));
+    assertEqualsAsNumber(jsCFStringWithCharacters, nan(""));
+    assertEqualsAsNumber(jsCFEmptyString, 0);
+    assertEqualsAsNumber(jsCFEmptyStringWithCharacters, 0);
+    ASSERT(sizeof(JSChar) == sizeof(UniChar));
+    
+    assertEqualsAsCharactersPtr(jsUndefined, "undefined");
+    assertEqualsAsCharactersPtr(jsNull, "null");
+    assertEqualsAsCharactersPtr(jsTrue, "true");
+    assertEqualsAsCharactersPtr(jsFalse, "false");
+    assertEqualsAsCharactersPtr(jsZero, "0");
+    assertEqualsAsCharactersPtr(jsOne, "1");
+    assertEqualsAsCharactersPtr(jsOneThird, "0.3333333333333333");
+    assertEqualsAsCharactersPtr(jsEmptyString, "");
+    assertEqualsAsCharactersPtr(jsOneString, "1");
+    assertEqualsAsCharactersPtr(jsCFString, "A");
+    assertEqualsAsCharactersPtr(jsCFStringWithCharacters, "A");
+    assertEqualsAsCharactersPtr(jsCFEmptyString, "");
+    assertEqualsAsCharactersPtr(jsCFEmptyStringWithCharacters, "");
+    
+    assertEqualsAsUTF8String(jsUndefined, "undefined");
+    assertEqualsAsUTF8String(jsNull, "null");
+    assertEqualsAsUTF8String(jsTrue, "true");
+    assertEqualsAsUTF8String(jsFalse, "false");
+    assertEqualsAsUTF8String(jsZero, "0");
+    assertEqualsAsUTF8String(jsOne, "1");
+    assertEqualsAsUTF8String(jsOneThird, "0.3333333333333333");
+    assertEqualsAsUTF8String(jsEmptyString, "");
+    assertEqualsAsUTF8String(jsOneString, "1");
+    assertEqualsAsUTF8String(jsCFString, "A");
+    assertEqualsAsUTF8String(jsCFStringWithCharacters, "A");
+    assertEqualsAsUTF8String(jsCFEmptyString, "");
+    assertEqualsAsUTF8String(jsCFEmptyStringWithCharacters, "");
+    
+    checkConstnessInJSObjectNames();
+    
+    ASSERT(JSValueIsStrictEqual(context, jsTrue, jsTrue));
+    ASSERT(!JSValueIsStrictEqual(context, jsOne, jsOneString));
+
+    ASSERT(JSValueIsEqual(context, jsOne, jsOneString, NULL));
+    ASSERT(!JSValueIsEqual(context, jsTrue, jsFalse, NULL));
+    
+    CFStringRef cfJSString = JSStringCopyCFString(kCFAllocatorDefault, jsCFIString);
+    CFStringRef cfJSEmptyString = JSStringCopyCFString(kCFAllocatorDefault, jsCFEmptyIString);
+    ASSERT(CFEqual(cfJSString, cfString));
+    ASSERT(CFEqual(cfJSEmptyString, cfEmptyString));
+    CFRelease(cfJSString);
+    CFRelease(cfJSEmptyString);
+
+    CFRelease(cfString);
+    CFRelease(cfEmptyString);
+    
+    jsGlobalValue = JSObjectMake(context, NULL, NULL);
+    makeGlobalNumberValue(context);
+    JSValueProtect(context, jsGlobalValue);
+    JSGarbageCollect(context);
+    ASSERT(JSValueIsObject(context, jsGlobalValue));
+    JSValueUnprotect(context, jsGlobalValue);
+    JSValueUnprotect(context, jsNumberValue);
+
+    JSStringRef goodSyntax = JSStringCreateWithUTF8CString("x = 1;");
+    const char* badSyntaxConstant = "x := 1;";
+    JSStringRef badSyntax = JSStringCreateWithUTF8CString(badSyntaxConstant);
+    ASSERT(JSCheckScriptSyntax(context, goodSyntax, NULL, 0, NULL));
+    ASSERT(!JSCheckScriptSyntax(context, badSyntax, NULL, 0, NULL));
+    ASSERT(!JSScriptCreateFromString(contextGroup, 0, 0, badSyntax, 0, 0));
+    ASSERT(!JSScriptCreateReferencingImmortalASCIIText(contextGroup, 0, 0, badSyntaxConstant, strlen(badSyntaxConstant), 0, 0));
+
+    JSValueRef result;
+    JSValueRef v;
+    JSObjectRef o;
+    JSStringRef string;
+
+    result = JSEvaluateScript(context, goodSyntax, NULL, NULL, 1, NULL);
+    ASSERT(result);
+    ASSERT(JSValueIsEqual(context, result, jsOne, NULL));
+
+    exception = NULL;
+    result = JSEvaluateScript(context, badSyntax, NULL, NULL, 1, &exception);
+    ASSERT(!result);
+    ASSERT(JSValueIsObject(context, exception));
+    
+    JSStringRef array = JSStringCreateWithUTF8CString("Array");
+    JSObjectRef arrayConstructor = JSValueToObject(context, JSObjectGetProperty(context, globalObject, array, NULL), NULL);
+    JSStringRelease(array);
+    result = JSObjectCallAsConstructor(context, arrayConstructor, 0, NULL, NULL);
+    ASSERT(result);
+    ASSERT(JSValueIsObject(context, result));
+    ASSERT(JSValueIsInstanceOfConstructor(context, result, arrayConstructor, NULL));
+    ASSERT(!JSValueIsInstanceOfConstructor(context, JSValueMakeNull(context), arrayConstructor, NULL));
+
+    o = JSValueToObject(context, result, NULL);
+    exception = NULL;
+    ASSERT(JSValueIsUndefined(context, JSObjectGetPropertyAtIndex(context, o, 0, &exception)));
+    ASSERT(!exception);
+    
+    JSObjectSetPropertyAtIndex(context, o, 0, JSValueMakeNumber(context, 1), &exception);
+    ASSERT(!exception);
+    
+    exception = NULL;
+    ASSERT(1 == JSValueToNumber(context, JSObjectGetPropertyAtIndex(context, o, 0, &exception), &exception));
+    ASSERT(!exception);
+
+    JSStringRef functionBody;
+    JSObjectRef function;
+    
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("rreturn Array;");
+    JSStringRef line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 2);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("rreturn Array;");
+    line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, -42, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 2);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("// Line one.\nrreturn Array;");
+    line = JSStringCreateWithUTF8CString("line");
+    ASSERT(!JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception));
+    ASSERT(JSValueIsObject(context, exception));
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), line, NULL);
+    assertEqualsAsNumber(v, 3);
+    JSStringRelease(functionBody);
+    JSStringRelease(line);
+
+    exception = NULL;
+    functionBody = JSStringCreateWithUTF8CString("return Array;");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, &exception);
+    JSStringRelease(functionBody);
+    ASSERT(!exception);
+    ASSERT(JSObjectIsFunction(context, function));
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(v);
+    ASSERT(JSValueIsEqual(context, v, arrayConstructor, NULL));
+    
+    exception = NULL;
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, jsEmptyIString, NULL, 0, &exception);
+    ASSERT(!exception);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, &exception);
+    ASSERT(v && !exception);
+    ASSERT(JSValueIsUndefined(context, v));
+    
+    exception = NULL;
+    v = NULL;
+    JSStringRef foo = JSStringCreateWithUTF8CString("foo");
+    JSStringRef argumentNames[] = { foo };
+    functionBody = JSStringCreateWithUTF8CString("return foo;");
+    function = JSObjectMakeFunction(context, foo, 1, argumentNames, functionBody, NULL, 1, &exception);
+    ASSERT(function && !exception);
+    JSValueRef arguments[] = { JSValueMakeNumber(context, 2) };
+    JSObjectCallAsFunction(context, function, NULL, 1, arguments, &exception);
+    JSStringRelease(foo);
+    JSStringRelease(functionBody);
+    
+    string = JSValueToStringCopy(context, function, NULL);
+    assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo) {\nreturn foo;\n}");
+    JSStringRelease(string);
+
+    JSStringRef print = JSStringCreateWithUTF8CString("print");
+    JSObjectRef printFunction = JSObjectMakeFunctionWithCallback(context, print, print_callAsFunction);
+    JSObjectSetProperty(context, globalObject, print, printFunction, kJSPropertyAttributeNone, NULL); 
+    JSStringRelease(print);
+    
+    ASSERT(!JSObjectSetPrivate(printFunction, (void*)1));
+    ASSERT(!JSObjectGetPrivate(printFunction));
+
+    JSStringRef myConstructorIString = JSStringCreateWithUTF8CString("MyConstructor");
+    JSObjectRef myConstructor = JSObjectMakeConstructor(context, NULL, myConstructor_callAsConstructor);
+    JSObjectSetProperty(context, globalObject, myConstructorIString, myConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myConstructorIString);
+    
+    JSStringRef myBadConstructorIString = JSStringCreateWithUTF8CString("MyBadConstructor");
+    JSObjectRef myBadConstructor = JSObjectMakeConstructor(context, NULL, myBadConstructor_callAsConstructor);
+    JSObjectSetProperty(context, globalObject, myBadConstructorIString, myBadConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(myBadConstructorIString);
+    
+    ASSERT(!JSObjectSetPrivate(myConstructor, (void*)1));
+    ASSERT(!JSObjectGetPrivate(myConstructor));
+    
+    string = JSStringCreateWithUTF8CString("Base");
+    JSObjectRef baseConstructor = JSObjectMakeConstructor(context, Base_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, baseConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+    
+    string = JSStringCreateWithUTF8CString("Derived");
+    JSObjectRef derivedConstructor = JSObjectMakeConstructor(context, Derived_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, derivedConstructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+    
+    string = JSStringCreateWithUTF8CString("Derived2");
+    JSObjectRef derived2Constructor = JSObjectMakeConstructor(context, Derived2_class(context), NULL);
+    JSObjectSetProperty(context, globalObject, string, derived2Constructor, kJSPropertyAttributeNone, NULL);
+    JSStringRelease(string);
+
+    o = JSObjectMake(context, NULL, NULL);
+    JSObjectSetProperty(context, o, jsOneIString, JSValueMakeNumber(context, 1), kJSPropertyAttributeNone, NULL);
+    JSObjectSetProperty(context, o, jsCFIString,  JSValueMakeNumber(context, 1), kJSPropertyAttributeDontEnum, NULL);
+    JSPropertyNameArrayRef nameArray = JSObjectCopyPropertyNames(context, o);
+    size_t expectedCount = JSPropertyNameArrayGetCount(nameArray);
+    size_t count;
+    for (count = 0; count < expectedCount; ++count)
+        JSPropertyNameArrayGetNameAtIndex(nameArray, count);
+    JSPropertyNameArrayRelease(nameArray);
+    ASSERT(count == 1); // jsCFString should not be enumerated
+
+    JSValueRef argumentsArrayValues[] = { JSValueMakeNumber(context, 10), JSValueMakeNumber(context, 20) };
+    o = JSObjectMakeArray(context, sizeof(argumentsArrayValues) / sizeof(JSValueRef), argumentsArrayValues, NULL);
+    string = JSStringCreateWithUTF8CString("length");
+    v = JSObjectGetProperty(context, o, string, NULL);
+    assertEqualsAsNumber(v, 2);
+    v = JSObjectGetPropertyAtIndex(context, o, 0, NULL);
+    assertEqualsAsNumber(v, 10);
+    v = JSObjectGetPropertyAtIndex(context, o, 1, NULL);
+    assertEqualsAsNumber(v, 20);
+
+    o = JSObjectMakeArray(context, 0, NULL, NULL);
+    v = JSObjectGetProperty(context, o, string, NULL);
+    assertEqualsAsNumber(v, 0);
+    JSStringRelease(string);
+
+    JSValueRef argumentsDateValues[] = { JSValueMakeNumber(context, 0) };
+    o = JSObjectMakeDate(context, 1, argumentsDateValues, NULL);
+    if (timeZoneIsPST())
+        assertEqualsAsUTF8String(o, "Wed Dec 31 1969 16:00:00 GMT-0800 (PST)");
+
+    string = JSStringCreateWithUTF8CString("an error message");
+    JSValueRef argumentsErrorValues[] = { JSValueMakeString(context, string) };
+    o = JSObjectMakeError(context, 1, argumentsErrorValues, NULL);
+    assertEqualsAsUTF8String(o, "Error: an error message");
+    JSStringRelease(string);
+
+    string = JSStringCreateWithUTF8CString("foo");
+    JSStringRef string2 = JSStringCreateWithUTF8CString("gi");
+    JSValueRef argumentsRegExpValues[] = { JSValueMakeString(context, string), JSValueMakeString(context, string2) };
+    o = JSObjectMakeRegExp(context, 2, argumentsRegExpValues, NULL);
+    assertEqualsAsUTF8String(o, "/foo/gi");
+    JSStringRelease(string);
+    JSStringRelease(string2);
+
+    JSClassDefinition nullDefinition = kJSClassDefinitionEmpty;
+    nullDefinition.attributes = kJSClassAttributeNoAutomaticPrototype;
+    JSClassRef nullClass = JSClassCreate(&nullDefinition);
+    JSClassRelease(nullClass);
+    
+    nullDefinition = kJSClassDefinitionEmpty;
+    nullClass = JSClassCreate(&nullDefinition);
+    JSClassRelease(nullClass);
+
+    functionBody = JSStringCreateWithUTF8CString("return this;");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, NULL);
+    JSStringRelease(functionBody);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSObjectCallAsFunction(context, function, o, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+
+    functionBody = JSStringCreateWithUTF8CString("return eval(\"this\");");
+    function = JSObjectMakeFunction(context, NULL, 0, NULL, functionBody, NULL, 1, NULL);
+    JSStringRelease(functionBody);
+    v = JSObjectCallAsFunction(context, function, NULL, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSObjectCallAsFunction(context, function, o, 0, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+
+    const char* thisScript = "this;";
+    JSStringRef script = JSStringCreateWithUTF8CString(thisScript);
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSEvaluateScript(context, script, o, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSStringRelease(script);
+
+    JSScriptRef scriptObject = JSScriptCreateReferencingImmortalASCIIText(contextGroup, 0, 0, thisScript, strlen(thisScript), 0, 0);
+    v = JSScriptEvaluate(context, scriptObject, NULL, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSScriptEvaluate(context, scriptObject, o, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSScriptRelease(scriptObject);
+
+    script = JSStringCreateWithUTF8CString("eval(this);");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, globalObject, NULL));
+    v = JSEvaluateScript(context, script, o, NULL, 1, NULL);
+    ASSERT(JSValueIsEqual(context, v, o, NULL));
+    JSStringRelease(script);
+
+    script = JSStringCreateWithUTF8CString("[ ]");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsArray(context, v));
+    JSStringRelease(script);
+
+    script = JSStringCreateWithUTF8CString("new Date");
+    v = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+    ASSERT(JSValueIsDate(context, v));
+    JSStringRelease(script);
+
+    exception = NULL;
+    script = JSStringCreateWithUTF8CString("rreturn Array;");
+    JSStringRef sourceURL = JSStringCreateWithUTF8CString("file:///foo/bar.js");
+    JSStringRef sourceURLKey = JSStringCreateWithUTF8CString("sourceURL");
+    JSEvaluateScript(context, script, NULL, sourceURL, 1, &exception);
+    ASSERT(exception);
+    v = JSObjectGetProperty(context, JSValueToObject(context, exception, NULL), sourceURLKey, NULL);
+    assertEqualsAsUTF8String(v, "file:///foo/bar.js");
+    JSStringRelease(script);
+    JSStringRelease(sourceURL);
+    JSStringRelease(sourceURLKey);
+
+    // Verify that creating a constructor for a class with no static functions does not trigger
+    // an assert inside putDirect or lead to a crash during GC. 
+    nullDefinition = kJSClassDefinitionEmpty;
+    nullClass = JSClassCreate(&nullDefinition);
+    JSObjectMakeConstructor(context, nullClass, 0);
+    JSClassRelease(nullClass);
+
+    char* scriptUTF8 = createStringWithContentsOfFile(scriptPath);
+    if (!scriptUTF8) {
+        printf("FAIL: Test script could not be loaded.\n");
+        failed = 1;
+    } else {
+        JSStringRef url = JSStringCreateWithUTF8CString(scriptPath);
+        JSStringRef script = JSStringCreateWithUTF8CString(scriptUTF8);
+        JSStringRef errorMessage = 0;
+        int errorLine = 0;
+        JSScriptRef scriptObject = JSScriptCreateFromString(contextGroup, url, 1, script, &errorMessage, &errorLine);
+        ASSERT((!scriptObject) != (!errorMessage));
+        if (!scriptObject) {
+            printf("FAIL: Test script did not parse\n\t%s:%d\n\t", scriptPath, errorLine);
+            CFStringRef errorCF = JSStringCopyCFString(kCFAllocatorDefault, errorMessage);
+            CFShow(errorCF);
+            CFRelease(errorCF);
+            JSStringRelease(errorMessage);
+            failed = 1;
+        }
+
+        JSStringRelease(script);
+        exception = NULL;
+        result = scriptObject ? JSScriptEvaluate(context, scriptObject, 0, &exception) : 0;
+        if (result && JSValueIsUndefined(context, result))
+            printf("PASS: Test script executed successfully.\n");
+        else {
+            printf("FAIL: Test script returned unexpected value:\n");
+            JSStringRef exceptionIString = JSValueToStringCopy(context, exception, NULL);
+            CFStringRef exceptionCF = JSStringCopyCFString(kCFAllocatorDefault, exceptionIString);
+            CFShow(exceptionCF);
+            CFRelease(exceptionCF);
+            JSStringRelease(exceptionIString);
+            failed = 1;
+        }
+        JSScriptRelease(scriptObject);
+        free(scriptUTF8);
+    }
+
+    // Check Promise is not exposed.
+    {
+        JSObjectRef globalObject = JSContextGetGlobalObject(context);
+        {
+            JSStringRef promiseProperty = JSStringCreateWithUTF8CString("Promise");
+            ASSERT(JSObjectHasProperty(context, globalObject, promiseProperty));
+            JSStringRelease(promiseProperty);
+        }
+        {
+            JSStringRef script = JSStringCreateWithUTF8CString("typeof Promise");
+            JSStringRef function = JSStringCreateWithUTF8CString("function");
+            JSValueRef value = JSEvaluateScript(context, script, NULL, NULL, 1, NULL);
+            ASSERT(JSValueIsString(context, value));
+            JSStringRef valueAsString = JSValueToStringCopy(context, value, NULL);
+            ASSERT(JSStringIsEqual(valueAsString, function));
+            JSStringRelease(valueAsString);
+            JSStringRelease(function);
+            JSStringRelease(script);
+        }
+        printf("PASS: Promise is exposed under JSContext API.\n");
+    }
+
+    // Check microtasks.
+    {
+        JSGlobalContextRef context = JSGlobalContextCreateInGroup(NULL, NULL);
+        {
+            JSObjectRef globalObject = JSContextGetGlobalObject(context);
+            JSValueRef exception;
+            JSStringRef code = JSStringCreateWithUTF8CString("result = 0; Promise.resolve(42).then(function (value) { result = value; });");
+            JSStringRef file = JSStringCreateWithUTF8CString("");
+            assertTrue(JSEvaluateScript(context, code, globalObject, file, 1, &exception), "An exception should not be thrown");
+            JSStringRelease(code);
+            JSStringRelease(file);
+
+            JSStringRef resultProperty = JSStringCreateWithUTF8CString("result");
+            ASSERT(JSObjectHasProperty(context, globalObject, resultProperty));
+
+            JSValueRef resultValue = JSObjectGetProperty(context, globalObject, resultProperty, &exception);
+            assertEqualsAsNumber(resultValue, 42);
+            JSStringRelease(resultProperty);
+        }
+        JSGlobalContextRelease(context);
+    }
+
+    failed = testTypedArrayCAPI() || failed;
+    failed = testExecutionTimeLimit() || failed;
+    failed = testFunctionOverrides() || failed;
+    failed = testGlobalContextWithFinalizer() || failed;
+    failed = testPingPongStackOverflow() || failed;
+    failed = testJSONParse() || failed;
+
+    // Clear out local variables pointing at JSObjectRefs to allow their values to be collected
+    function = NULL;
+    v = NULL;
+    o = NULL;
+    globalObject = NULL;
+    myConstructor = NULL;
+
+    JSStringRelease(jsEmptyIString);
+    JSStringRelease(jsOneIString);
+    JSStringRelease(jsCFIString);
+    JSStringRelease(jsCFEmptyIString);
+    JSStringRelease(jsCFIStringWithCharacters);
+    JSStringRelease(jsCFEmptyIStringWithCharacters);
+    JSStringRelease(goodSyntax);
+    JSStringRelease(badSyntax);
+
+    JSGlobalContextRelease(context);
+    JSClassRelease(globalObjectClass);
+
+    // Test for an infinite prototype chain that used to be created. This test
+    // passes if the call to JSObjectHasProperty() does not hang.
+
+    JSClassDefinition prototypeLoopClassDefinition = kJSClassDefinitionEmpty;
+    prototypeLoopClassDefinition.staticFunctions = globalObject_staticFunctions;
+    JSClassRef prototypeLoopClass = JSClassCreate(&prototypeLoopClassDefinition);
+    JSGlobalContextRef prototypeLoopContext = JSGlobalContextCreateInGroup(NULL, prototypeLoopClass);
+
+    JSStringRef nameProperty = JSStringCreateWithUTF8CString("name");
+    JSObjectHasProperty(prototypeLoopContext, JSContextGetGlobalObject(prototypeLoopContext), nameProperty);
+
+    JSGlobalContextRelease(prototypeLoopContext);
+    JSClassRelease(prototypeLoopClass);
+
+    printf("PASS: Infinite prototype chain does not occur.\n");
+
+    if (checkForCycleInPrototypeChain())
+        printf("PASS: A cycle in a prototype chain can't be created.\n");
+    else {
+        printf("FAIL: A cycle in a prototype chain can be created.\n");
+        failed = true;
+    }
+    if (valueToObjectExceptionTest())
+        printf("PASS: throwException did not crash when handling an error with appendMessageToError set and no codeBlock available.\n");
+
+    if (globalContextNameTest())
+        printf("PASS: global context name behaves as expected.\n");
+
+    customGlobalObjectClassTest();
+    globalObjectSetPrototypeTest();
+    globalObjectPrivatePropertyTest();
+
+    if (failed) {
+        printf("FAIL: Some tests failed.\n");
+        return 1;
+    }
+
+    printf("PASS: Program exited normally.\n");
+    return 0;
+}
+
+static char* createStringWithContentsOfFile(const char* fileName)
+{
+    char* buffer;
+    
+    size_t buffer_size = 0;
+    size_t buffer_capacity = 1024;
+    buffer = (char*)malloc(buffer_capacity);
+    
+    FILE* f = fopen(fileName, "r");
+    if (!f) {
+        fprintf(stderr, "Could not open file: %s\n", fileName);
+        free(buffer);
+        return 0;
+    }
+    
+    while (!feof(f) && !ferror(f)) {
+        buffer_size += fread(buffer + buffer_size, 1, buffer_capacity - buffer_size, f);
+        if (buffer_size == buffer_capacity) { // guarantees space for trailing '\0'
+            buffer_capacity *= 2;
+            buffer = (char*)realloc(buffer, buffer_capacity);
+            ASSERT(buffer);
+        }
+        
+        ASSERT(buffer_size < buffer_capacity);
+    }
+    fclose(f);
+    buffer[buffer_size] = '\0';
+    
+    return buffer;
+}
+
+#if OS(WINDOWS)
+extern "C" __declspec(dllexport) int WINAPI dllLauncherEntryPoint(int argc, const char* argv[])
+{
+    return main(argc, const_cast(argv));
+}
+#endif
diff --git a/API/tests/testapi.js b/API/tests/testapi.js
new file mode 100644
index 0000000..88d3701
--- /dev/null
+++ b/API/tests/testapi.js
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2006 Apple Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function bludgeonArguments() { if (0) arguments; return function g() {} }
+h = bludgeonArguments();
+gc();
+
+var failed = false;
+function pass(msg)
+{
+    print("PASS: " + msg, "green");
+}
+
+function fail(msg)
+{
+    print("FAIL: " + msg, "red");
+    failed = true;
+}
+
+function shouldBe(a, b)
+{
+    var evalA;
+    try {
+        evalA = eval(a);
+    } catch(e) {
+        evalA = e;
+    }
+    
+    if (evalA == b || isNaN(evalA) && typeof evalA == 'number' && isNaN(b) && typeof b == 'number')
+        pass(a + " should be " + b + " and is.");
+    else
+        fail(a + " should be " + b + " but instead is " + evalA + ".");
+}
+
+function shouldThrow(a)
+{
+    var evalA;
+    try {
+        eval(a);
+    } catch(e) {
+        pass(a + " threw: " + e);
+        return;
+    }
+
+    fail(a + " did not throw an exception.");
+}
+
+function globalStaticFunction()
+{
+    return 4;
+}
+
+shouldBe("globalStaticValue", 3);
+shouldBe("globalStaticFunction()", 4);
+shouldBe("this.globalStaticFunction()", 4);
+
+function globalStaticFunction2() {
+    return 10;
+}
+shouldBe("globalStaticFunction2();", 10);
+this.globalStaticFunction2 = function() { return 20; }
+shouldBe("globalStaticFunction2();", 20);
+shouldBe("this.globalStaticFunction2();", 20);
+
+function iAmNotAStaticFunction() { return 10; }
+shouldBe("iAmNotAStaticFunction();", 10);
+this.iAmNotAStaticFunction = function() { return 20; }
+shouldBe("iAmNotAStaticFunction();", 20);
+
+shouldBe("typeof MyObject", "function"); // our object implements 'call'
+MyObject.cantFind = 1;
+shouldBe("MyObject.cantFind", undefined);
+MyObject.regularType = 1;
+shouldBe("MyObject.regularType", 1);
+MyObject.alwaysOne = 2;
+shouldBe("MyObject.alwaysOne", 1);
+MyObject.cantDelete = 1;
+delete MyObject.cantDelete;
+shouldBe("MyObject.cantDelete", 1);
+shouldBe("delete MyObject.throwOnDelete", "an exception");
+MyObject.cantSet = 1;
+shouldBe("MyObject.cantSet", undefined);
+shouldBe("MyObject.throwOnGet", "an exception");
+shouldBe("MyObject.throwOnSet = 5", "an exception");
+shouldBe("MyObject('throwOnCall')", "an exception");
+shouldBe("new MyObject('throwOnConstruct')", "an exception");
+shouldBe("'throwOnHasInstance' instanceof MyObject", "an exception");
+
+MyObject.nullGetForwardSet = 1;
+shouldBe("MyObject.nullGetForwardSet", 1);
+
+var foundMyPropertyName = false;
+var foundRegularType = false;
+for (var p in MyObject) {
+    if (p == "myPropertyName")
+        foundMyPropertyName = true;
+    if (p == "regularType")
+        foundRegularType = true;
+}
+
+if (foundMyPropertyName)
+    pass("MyObject.myPropertyName was enumerated");
+else
+    fail("MyObject.myPropertyName was not enumerated");
+
+if (foundRegularType)
+    pass("MyObject.regularType was enumerated");
+else
+    fail("MyObject.regularType was not enumerated");
+
+var alwaysOneDescriptor = Object.getOwnPropertyDescriptor(MyObject, "alwaysOne");
+shouldBe('typeof alwaysOneDescriptor', "object");
+shouldBe('alwaysOneDescriptor.value', MyObject.alwaysOne);
+shouldBe('alwaysOneDescriptor.configurable', true);
+shouldBe('alwaysOneDescriptor.enumerable', false); // Actually it is.
+var cantFindDescriptor = Object.getOwnPropertyDescriptor(MyObject, "cantFind");
+shouldBe('typeof cantFindDescriptor', "object");
+shouldBe('cantFindDescriptor.value', MyObject.cantFind);
+shouldBe('cantFindDescriptor.configurable', true);
+shouldBe('cantFindDescriptor.enumerable', false);
+try {
+    // If getOwnPropertyDescriptor() returned an access descriptor, this wouldn't throw.
+    Object.getOwnPropertyDescriptor(MyObject, "throwOnGet");
+} catch (e) {
+    pass("getting property descriptor of throwOnGet threw exception");
+}
+var myPropertyNameDescriptor = Object.getOwnPropertyDescriptor(MyObject, "myPropertyName");
+shouldBe('typeof myPropertyNameDescriptor', "object");
+shouldBe('myPropertyNameDescriptor.value', MyObject.myPropertyName);
+shouldBe('myPropertyNameDescriptor.configurable', true);
+shouldBe('myPropertyNameDescriptor.enumerable', false); // Actually it is.
+try {
+    // if getOwnPropertyDescriptor() returned an access descriptor, this wouldn't throw.
+    Object.getOwnPropertyDescriptor(MyObject, "hasPropertyLie");
+} catch (e) {
+    pass("getting property descriptor of hasPropertyLie threw exception");
+}
+shouldBe('Object.getOwnPropertyDescriptor(MyObject, "doesNotExist")', undefined);
+
+myObject = new MyObject();
+
+shouldBe("delete MyObject.regularType", true);
+shouldBe("MyObject.regularType", undefined);
+shouldBe("MyObject(0)", 1);
+shouldBe("MyObject()", undefined);
+shouldBe("typeof myObject", "object");
+shouldBe("MyObject ? 1 : 0", true); // toBoolean
+shouldBe("+MyObject", 1); // toNumber
+shouldBe("(Object.prototype.toString.call(MyObject))", "[object MyObject]"); // Object.prototype.toString
+shouldBe("(MyObject.toString())", "[object MyObject]"); // toString
+shouldBe("String(MyObject)", "MyObjectAsString"); // toString
+shouldBe("MyObject - 0", 1); // toNumber
+shouldBe("MyObject.valueOf()", 1); // valueOf
+
+shouldBe("typeof MyConstructor", "object");
+constructedObject = new MyConstructor(1);
+shouldBe("typeof constructedObject", "object");
+shouldBe("constructedObject.value", 1);
+shouldBe("myObject instanceof MyObject", true);
+shouldBe("(new Object()) instanceof MyObject", false);
+
+shouldThrow("new MyBadConstructor()");
+
+MyObject.nullGetSet = 1;
+shouldBe("MyObject.nullGetSet", 1);
+shouldThrow("MyObject.nullCall()");
+shouldThrow("MyObject.hasPropertyLie");
+
+derived = new Derived();
+
+shouldBe("derived instanceof Derived", true);
+shouldBe("derived instanceof Base", true);
+
+// base properties and functions return 1 when called/gotten; derived, 2
+shouldBe("derived.baseProtoDup()", 2);
+shouldBe("derived.baseProto()", 1);
+shouldBe("derived.baseDup", 2);
+shouldBe("derived.baseOnly", 1);
+shouldBe("derived.protoOnly()", 2);
+shouldBe("derived.protoDup", 2);
+shouldBe("derived.derivedOnly", 2)
+
+shouldBe("derived.baseHardNull()", undefined)
+
+// base properties throw 1 when set; derived, 2
+shouldBe("derived.baseDup = 0", 2);
+shouldBe("derived.baseOnly = 0", 1);
+shouldBe("derived.derivedOnly = 0", 2)
+shouldBe("derived.protoDup = 0", 2);
+
+derived2 = new Derived2();
+
+shouldBe("derived2 instanceof Derived2", true);
+shouldBe("derived2 instanceof Derived", true);
+shouldBe("derived2 instanceof Base", true);
+
+// base properties and functions return 1 when called/gotten; derived, 2
+shouldBe("derived2.baseProtoDup()", 2);
+shouldBe("derived2.baseProto()", 1);
+shouldBe("derived2.baseDup", 2);
+shouldBe("derived2.baseOnly", 1);
+shouldBe("derived2.protoOnly()", 2);
+shouldBe("derived2.protoDup", 2);
+shouldBe("derived2.derivedOnly", 2)
+
+// base properties throw 1 when set; derived, 2
+shouldBe("derived2.baseDup = 0", 2);
+shouldBe("derived2.baseOnly = 0", 1);
+shouldBe("derived2.derivedOnly = 0", 2)
+shouldBe("derived2.protoDup = 0", 2);
+
+shouldBe('Object.getOwnPropertyDescriptor(derived, "baseProto")', undefined);
+shouldBe('Object.getOwnPropertyDescriptor(derived, "baseProtoDup")', undefined);
+var baseDupDescriptor = Object.getOwnPropertyDescriptor(derived, "baseDup");
+shouldBe('typeof baseDupDescriptor', "object");
+shouldBe('baseDupDescriptor.value', derived.baseDup);
+shouldBe('baseDupDescriptor.configurable', true);
+shouldBe('baseDupDescriptor.enumerable', false);
+var baseOnlyDescriptor = Object.getOwnPropertyDescriptor(derived, "baseOnly");
+shouldBe('typeof baseOnlyDescriptor', "object");
+shouldBe('baseOnlyDescriptor.value', derived.baseOnly);
+shouldBe('baseOnlyDescriptor.configurable', true);
+shouldBe('baseOnlyDescriptor.enumerable', false);
+shouldBe('Object.getOwnPropertyDescriptor(derived, "protoOnly")', undefined);
+var protoDupDescriptor = Object.getOwnPropertyDescriptor(derived, "protoDup");
+shouldBe('typeof protoDupDescriptor', "object");
+shouldBe('protoDupDescriptor.value', derived.protoDup);
+shouldBe('protoDupDescriptor.configurable', true);
+shouldBe('protoDupDescriptor.enumerable', false);
+var derivedOnlyDescriptor = Object.getOwnPropertyDescriptor(derived, "derivedOnly");
+shouldBe('typeof derivedOnlyDescriptor', "object");
+shouldBe('derivedOnlyDescriptor.value', derived.derivedOnly);
+shouldBe('derivedOnlyDescriptor.configurable', true);
+shouldBe('derivedOnlyDescriptor.enumerable', false);
+
+shouldBe("undefined instanceof MyObject", false);
+EvilExceptionObject.hasInstance = function f() { return f(); };
+EvilExceptionObject.__proto__ = undefined;
+shouldThrow("undefined instanceof EvilExceptionObject");
+EvilExceptionObject.hasInstance = function () { return true; };
+shouldBe("undefined instanceof EvilExceptionObject", true);
+
+EvilExceptionObject.toNumber = function f() { return f(); }
+shouldThrow("EvilExceptionObject*5");
+EvilExceptionObject.toStringExplicit = function f() { return f(); }
+shouldThrow("String(EvilExceptionObject)");
+
+shouldBe("console", "[object Console]");
+shouldBe("typeof console.log", "function");
+
+shouldBe("EmptyObject", "[object CallbackObject]");
+
+for (var i = 0; i < 6; ++i)
+    PropertyCatchalls.x = i;
+shouldBe("PropertyCatchalls.x", 4);
+
+for (var i = 0; i < 6; ++i)
+    var x = PropertyCatchalls.x;
+shouldBe("x", null);
+var make_throw = 'make_throw';
+shouldThrow("PropertyCatchalls[make_throw]=1");
+make_throw = 0;
+shouldThrow("PropertyCatchalls[make_throw]=1");
+
+for (var i = 0; i < 10; ++i) {
+    for (var p in PropertyCatchalls) {
+        if (p == "x")
+            continue;
+        shouldBe("p", i % 10);
+        break;
+    }
+}
+
+PropertyCatchalls.__proto__ = { y: 1 };
+for (var i = 0; i < 6; ++i)
+    var y = PropertyCatchalls.y;
+shouldBe("y", null);
+
+var o = { __proto__: PropertyCatchalls };
+for (var i = 0; i < 6; ++i)
+    var z = PropertyCatchalls.z;
+shouldBe("z", null);
+
+if (failed)
+    throw "Some tests failed";
diff --git a/API/tests/testapi.mm b/API/tests/testapi.mm
new file mode 100644
index 0000000..bb69ad0
--- /dev/null
+++ b/API/tests/testapi.mm
@@ -0,0 +1,1523 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import 
+
+#import "CurrentThisInsideBlockGetterTest.h"
+#import "DateTests.h"
+#import "JSExportTests.h"
+#import "Regress141275.h"
+#import "Regress141809.h"
+
+#import 
+#import 
+
+extern "C" void JSSynchronousGarbageCollectForDebugging(JSContextRef);
+extern "C" void JSSynchronousEdenCollectForDebugging(JSContextRef);
+
+extern "C" bool _Block_has_signature(id);
+extern "C" const char * _Block_signature(id);
+
+extern int failed;
+extern "C" void testObjectiveCAPI(void);
+extern "C" void checkResult(NSString *, bool);
+
+#if JSC_OBJC_API_ENABLED
+
+@interface UnexportedObject : NSObject
+@end
+
+@implementation UnexportedObject
+@end
+
+@protocol ParentObject 
+@end
+
+@interface ParentObject : NSObject
++ (NSString *)parentTest;
+@end
+
+@implementation ParentObject
++ (NSString *)parentTest
+{
+    return [self description];
+}
+@end
+
+@protocol TestObject 
+- (id)init;
+@property int variable;
+@property (readonly) int six;
+@property CGPoint point;
++ (NSString *)classTest;
++ (NSString *)parentTest;
+- (NSString *)getString;
+JSExportAs(testArgumentTypes,
+- (NSString *)testArgumentTypesWithInt:(int)i double:(double)d boolean:(BOOL)b string:(NSString *)s number:(NSNumber *)n array:(NSArray *)a dictionary:(NSDictionary *)o
+);
+- (void)callback:(JSValue *)function;
+- (void)bogusCallback:(void(^)(int))function;
+@end
+
+@interface TestObject : ParentObject 
+@property int six;
++ (id)testObject;
+@end
+
+@implementation TestObject
+@synthesize variable;
+@synthesize six;
+@synthesize point;
++ (id)testObject
+{
+    return [[TestObject alloc] init];
+}
++ (NSString *)classTest
+{
+    return @"classTest - okay";
+}
+- (NSString *)getString
+{
+    return @"42";
+}
+- (NSString *)testArgumentTypesWithInt:(int)i double:(double)d boolean:(BOOL)b string:(NSString *)s number:(NSNumber *)n array:(NSArray *)a dictionary:(NSDictionary *)o
+{
+    return [NSString stringWithFormat:@"%d,%g,%d,%@,%d,%@,%@", i, d, b==YES?true:false,s,[n intValue],a[1],o[@"x"]];
+}
+- (void)callback:(JSValue *)function
+{
+    [function callWithArguments:[NSArray arrayWithObject:[NSNumber numberWithInt:42]]];
+}
+- (void)bogusCallback:(void(^)(int))function
+{
+    function(42);
+}
+@end
+
+bool testXYZTested = false;
+
+@protocol TextXYZ 
+- (id)initWithString:(NSString*)string;
+@property int x;
+@property (readonly) int y;
+@property (assign) JSValue *onclick;
+@property (assign) JSValue *weakOnclick;
+- (void)test:(NSString *)message;
+@end
+
+@interface TextXYZ : NSObject 
+@property int x;
+@property int y;
+@property int z;
+- (void)click;
+@end
+
+@implementation TextXYZ {
+    JSManagedValue *m_weakOnclickHandler;
+    JSManagedValue *m_onclickHandler;
+}
+@synthesize x;
+@synthesize y;
+@synthesize z;
+- (id)initWithString:(NSString*)string
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    NSLog(@"%@", string);
+
+    return self;
+}
+- (void)test:(NSString *)message
+{
+    testXYZTested = [message isEqual:@"test"] && x == 13 & y == 4 && z == 5;
+}
+- (void)setWeakOnclick:(JSValue *)value
+{
+    m_weakOnclickHandler = [JSManagedValue managedValueWithValue:value];
+}
+
+- (void)setOnclick:(JSValue *)value
+{
+    m_onclickHandler = [JSManagedValue managedValueWithValue:value];
+    [value.context.virtualMachine addManagedReference:m_onclickHandler withOwner:self];
+}
+- (JSValue *)weakOnclick
+{
+    return [m_weakOnclickHandler value];
+}
+- (JSValue *)onclick
+{
+    return [m_onclickHandler value];
+}
+- (void)click
+{
+    if (!m_onclickHandler)
+        return;
+
+    JSValue *function = [m_onclickHandler value];
+    [function callWithArguments:[NSArray array]];
+}
+@end
+
+@class TinyDOMNode;
+
+@protocol TinyDOMNode 
+- (void)appendChild:(TinyDOMNode *)child;
+- (NSUInteger)numberOfChildren;
+- (TinyDOMNode *)childAtIndex:(NSUInteger)index;
+- (void)removeChildAtIndex:(NSUInteger)index;
+@end
+
+@interface TinyDOMNode : NSObject
+@end
+
+@implementation TinyDOMNode {
+    NSMutableArray *m_children;
+    JSVirtualMachine *m_sharedVirtualMachine;
+}
+
+- (id)initWithVirtualMachine:(JSVirtualMachine *)virtualMachine
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    m_children = [[NSMutableArray alloc] initWithCapacity:0];
+    m_sharedVirtualMachine = virtualMachine;
+#if !__has_feature(objc_arc)
+    [m_sharedVirtualMachine retain];
+#endif
+
+    return self;
+}
+
+- (void)appendChild:(TinyDOMNode *)child
+{
+    [m_sharedVirtualMachine addManagedReference:child withOwner:self];
+    [m_children addObject:child];
+}
+
+- (NSUInteger)numberOfChildren
+{
+    return [m_children count];
+}
+
+- (TinyDOMNode *)childAtIndex:(NSUInteger)index
+{
+    if (index >= [m_children count])
+        return nil;
+    return [m_children objectAtIndex:index];
+}
+
+- (void)removeChildAtIndex:(NSUInteger)index
+{
+    if (index >= [m_children count])
+        return;
+    [m_sharedVirtualMachine removeManagedReference:[m_children objectAtIndex:index] withOwner:self];
+    [m_children removeObjectAtIndex:index];
+}
+
+@end
+
+@interface JSCollection : NSObject
+- (void)setValue:(JSValue *)value forKey:(NSString *)key;
+- (JSValue *)valueForKey:(NSString *)key;
+@end
+
+@implementation JSCollection {
+    NSMutableDictionary *_dict;
+}
+- (id)init
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    _dict = [[NSMutableDictionary alloc] init];
+
+    return self;
+}
+
+- (void)setValue:(JSValue *)value forKey:(NSString *)key
+{
+    JSManagedValue *oldManagedValue = [_dict objectForKey:key];
+    if (oldManagedValue) {
+        JSValue* oldValue = [oldManagedValue value];
+        if (oldValue)
+            [oldValue.context.virtualMachine removeManagedReference:oldManagedValue withOwner:self];
+    }
+    JSManagedValue *managedValue = [JSManagedValue managedValueWithValue:value];
+    [value.context.virtualMachine addManagedReference:managedValue withOwner:self];
+    [_dict setObject:managedValue forKey:key];
+}
+
+- (JSValue *)valueForKey:(NSString *)key
+{
+    JSManagedValue *managedValue = [_dict objectForKey:key];
+    if (!managedValue)
+        return nil;
+    return [managedValue value];
+}
+@end
+
+@protocol InitA 
+- (id)initWithA:(int)a;
+- (int)initialize;
+@end
+
+@protocol InitB 
+- (id)initWithA:(int)a b:(int)b;
+@end
+
+@protocol InitC 
+- (id)_init;
+@end
+
+@interface ClassA : NSObject
+@end
+
+@interface ClassB : ClassA
+@end
+
+@interface ClassC : ClassB
+@end
+
+@interface ClassCPrime : ClassB
+@end
+
+@interface ClassD : NSObject
+- (id)initWithA:(int)a;
+@end
+
+@interface ClassE : ClassD
+- (id)initWithA:(int)a;
+@end
+
+@implementation ClassA {
+    int _a;
+}
+- (id)initWithA:(int)a
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    _a = a;
+
+    return self;
+}
+- (int)initialize
+{
+    return 42;
+}
+@end
+
+@implementation ClassB {
+    int _b;
+}
+- (id)initWithA:(int)a b:(int)b
+{
+    self = [super initWithA:a];
+    if (!self)
+        return nil;
+
+    _b = b;
+
+    return self;
+}
+@end
+
+@implementation ClassC {
+    int _c;
+}
+- (id)initWithA:(int)a
+{
+    return [self initWithA:a b:0];
+}
+- (id)initWithA:(int)a b:(int)b
+{
+    self = [super initWithA:a b:b];
+    if (!self)
+        return nil;
+
+    _c = a + b;
+
+    return self;
+}
+@end
+
+@implementation ClassCPrime
+- (id)initWithA:(int)a
+{
+    self = [super initWithA:a b:0];
+    if (!self)
+        return nil;
+    return self;
+}
+- (id)_init
+{
+    return [self initWithA:42];
+}
+@end
+
+@implementation ClassD
+
+- (id)initWithA:(int)a
+{
+    self = nil;
+    return [[ClassE alloc] initWithA:a];
+}
+- (int)initialize
+{
+    return 0;
+}
+@end
+
+@implementation ClassE {
+    int _a;
+}
+
+- (id)initWithA:(int)a
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    _a = a;
+
+    return self;
+}
+@end
+
+static bool evilAllocationObjectWasDealloced = false;
+
+@interface EvilAllocationObject : NSObject
+- (JSValue *)doEvilThingsWithContext:(JSContext *)context;
+@end
+
+@implementation EvilAllocationObject {
+    JSContext *m_context;
+}
+- (id)initWithContext:(JSContext *)context
+{
+    self = [super init];
+    if (!self)
+        return nil;
+
+    m_context = context;
+
+    return self;
+}
+- (void)dealloc
+{
+    [self doEvilThingsWithContext:m_context];
+    evilAllocationObjectWasDealloced = true;
+#if !__has_feature(objc_arc)
+    [super dealloc];
+#endif
+}
+
+- (JSValue *)doEvilThingsWithContext:(JSContext *)context
+{
+    JSValue *result = [context evaluateScript:@" \
+        (function() { \
+            var a = []; \
+            var sum = 0; \
+            for (var i = 0; i < 10000; ++i) { \
+                sum += i; \
+                a[i] = sum; \
+            } \
+            return sum; \
+        })()"];
+
+    JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+    return result;
+}
+@end
+
+extern "C" void checkResult(NSString *description, bool passed)
+{
+    NSLog(@"TEST: \"%@\": %@", description, passed ? @"PASSED" : @"FAILED");
+    if (!passed)
+        failed = 1;
+}
+
+static bool blockSignatureContainsClass()
+{
+    static bool containsClass = ^{
+        id block = ^(NSString *string){ return string; };
+        return _Block_has_signature(block) && strstr(_Block_signature(block), "NSString");
+    }();
+    return containsClass;
+}
+
+static void* threadMain(void* contextPtr)
+{
+    JSContext *context = (__bridge JSContext*)contextPtr;
+
+    // Do something to enter the VM.
+    TestObject *testObject = [TestObject testObject];
+    context[@"testObject"] = testObject;
+    return nullptr;
+}
+
+static void* multiVMThreadMain(void* okPtr)
+{
+    bool& ok = *static_cast(okPtr);
+    JSVirtualMachine *vm = [[JSVirtualMachine alloc] init];
+    JSContext* context = [[JSContext alloc] initWithVirtualMachine:vm];
+    [context evaluateScript:
+        @"var array = [{}];\n"
+         "for (var i = 0; i < 20; ++i) {\n"
+         "    var newArray = new Array(array.length * 2);\n"
+         "    for (var j = 0; j < newArray.length; ++j)\n"
+         "        newArray[j] = {parent: array[j / 2]};\n"
+         "    array = newArray;\n"
+         "}\n"];
+    if (context.exception) {
+        NSLog(@"Uncaught exception.\n");
+        ok = false;
+    }
+    if (![context.globalObject valueForProperty:@"array"].toObject) {
+        NSLog(@"Did not find \"array\" variable.\n");
+        ok = false;
+    }
+    JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+    return nullptr;
+}
+
+static void testObjectiveCAPIMain()
+{
+    @autoreleasepool {
+        JSVirtualMachine* vm = [[JSVirtualMachine alloc] init];
+        JSContext* context = [[JSContext alloc] initWithVirtualMachine:vm];
+        [context evaluateScript:@"bad"];
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *result = [context evaluateScript:@"2 + 2"];
+        checkResult(@"2 + 2", result.isNumber && [result toInt32] == 4);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        NSString *result = [NSString stringWithFormat:@"Two plus two is %@", [context evaluateScript:@"2 + 2"]];
+        checkResult(@"stringWithFormat", [result isEqual:@"Two plus two is 4"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"message"] = @"Hello";
+        JSValue *result = [context evaluateScript:@"message + ', World!'"];
+        checkResult(@"Hello, World!", result.isString && [result isEqualToObject:@"Hello, World!"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        checkResult(@"Promise is exposed", ![context[@"Promise"] isUndefined]);
+        JSValue *result = [context evaluateScript:@"typeof Promise"];
+        checkResult(@"typeof Promise is 'function'", result.isString && [result isEqualToObject:@"function"]);
+    }
+
+    @autoreleasepool {
+        JSVirtualMachine* vm = [[JSVirtualMachine alloc] init];
+        JSContext* context = [[JSContext alloc] initWithVirtualMachine:vm];
+        [context evaluateScript:@"result = 0; Promise.resolve(42).then(function (value) { result = value; });"];
+        checkResult(@"Microtask is drained", [context[@"result"]  isEqualToObject:@42]);
+    }
+
+    @autoreleasepool {
+        JSVirtualMachine* vm = [[JSVirtualMachine alloc] init];
+        JSContext* context = [[JSContext alloc] initWithVirtualMachine:vm];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        [context evaluateScript:@"result = 0; callbackResult = 0; Promise.resolve(42).then(function (value) { result = value; }); callbackResult = testObject.getString();"];
+        checkResult(@"Microtask is drained with same VM", [context[@"result"]  isEqualToObject:@42] && [context[@"callbackResult"] isEqualToObject:@"42"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *result = [context evaluateScript:@"({ x:42 })"];
+        checkResult(@"({ x:42 })", result.isObject && [result[@"x"] isEqualToObject:@42]);
+        id obj = [result toObject];
+        checkResult(@"Check dictionary literal", [obj isKindOfClass:[NSDictionary class]]);
+        id num = (NSDictionary *)obj[@"x"];
+        checkResult(@"Check numeric literal", [num isKindOfClass:[NSNumber class]]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *result = [context evaluateScript:@"[ ]"];
+        checkResult(@"[ ]", result.isArray);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *result = [context evaluateScript:@"new Date"];
+        checkResult(@"new Date", result.isDate);
+    }
+
+    @autoreleasepool {
+        JSCollection* myPrivateProperties = [[JSCollection alloc] init];
+
+        @autoreleasepool {
+            JSContext* context = [[JSContext alloc] init];
+            TestObject* rootObject = [TestObject testObject];
+            context[@"root"] = rootObject;
+            [context.virtualMachine addManagedReference:myPrivateProperties withOwner:rootObject];
+            [myPrivateProperties setValue:[JSValue valueWithBool:true inContext:context] forKey:@"is_ham"];
+            [myPrivateProperties setValue:[JSValue valueWithObject:@"hello!" inContext:context] forKey:@"message"];
+            [myPrivateProperties setValue:[JSValue valueWithInt32:42 inContext:context] forKey:@"my_number"];
+            [myPrivateProperties setValue:[JSValue valueWithNullInContext:context] forKey:@"definitely_null"];
+            [myPrivateProperties setValue:[JSValue valueWithUndefinedInContext:context] forKey:@"not_sure_if_undefined"];
+
+            JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+            JSValue *isHam = [myPrivateProperties valueForKey:@"is_ham"];
+            JSValue *message = [myPrivateProperties valueForKey:@"message"];
+            JSValue *myNumber = [myPrivateProperties valueForKey:@"my_number"];
+            JSValue *definitelyNull = [myPrivateProperties valueForKey:@"definitely_null"];
+            JSValue *notSureIfUndefined = [myPrivateProperties valueForKey:@"not_sure_if_undefined"];
+            checkResult(@"is_ham is true", isHam.isBoolean && [isHam toBool]);
+            checkResult(@"message is hello!", message.isString && [@"hello!" isEqualToString:[message toString]]);
+            checkResult(@"my_number is 42", myNumber.isNumber && [myNumber toInt32] == 42);
+            checkResult(@"definitely_null is null", definitelyNull.isNull);
+            checkResult(@"not_sure_if_undefined is undefined", notSureIfUndefined.isUndefined);
+        }
+
+        checkResult(@"is_ham is nil", ![myPrivateProperties valueForKey:@"is_ham"]);
+        checkResult(@"message is nil", ![myPrivateProperties valueForKey:@"message"]);
+        checkResult(@"my_number is 42", ![myPrivateProperties valueForKey:@"my_number"]);
+        checkResult(@"definitely_null is null", ![myPrivateProperties valueForKey:@"definitely_null"]);
+        checkResult(@"not_sure_if_undefined is undefined", ![myPrivateProperties valueForKey:@"not_sure_if_undefined"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *message = [JSValue valueWithObject:@"hello" inContext:context];
+        TestObject *rootObject = [TestObject testObject];
+        JSCollection *collection = [[JSCollection alloc] init];
+        context[@"root"] = rootObject;
+        @autoreleasepool {
+            JSValue *jsCollection = [JSValue valueWithObject:collection inContext:context];
+            JSManagedValue *weakCollection = [JSManagedValue managedValueWithValue:jsCollection andOwner:rootObject];
+            [context.virtualMachine addManagedReference:weakCollection withOwner:message];
+            JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+        }
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        __block int result;
+        context[@"blockCallback"] = ^(int value){
+            result = value;
+        };
+        [context evaluateScript:@"blockCallback(42)"];
+        checkResult(@"blockCallback", result == 42);
+    }
+
+    if (blockSignatureContainsClass()) {
+        @autoreleasepool {
+            JSContext *context = [[JSContext alloc] init];
+            __block bool result = false;
+            context[@"blockCallback"] = ^(NSString *value){
+                result = [@"42" isEqualToString:value] == YES;
+            };
+            [context evaluateScript:@"blockCallback(42)"];
+            checkResult(@"blockCallback(NSString *)", result);
+        }
+    } else
+        NSLog(@"Skipping 'blockCallback(NSString *)' test case");
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        checkResult(@"!context.exception", !context.exception);
+        [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        checkResult(@"context.exception", context.exception);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        __block bool caught = false;
+        context.exceptionHandler = ^(JSContext *context, JSValue *exception) {
+            (void)context;
+            (void)exception;
+            caught = true;
+        };
+        [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        checkResult(@"JSContext.exceptionHandler", caught);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        __block int expectedExceptionLineNumber = 1;
+        __block bool sawExpectedExceptionLineNumber = false;
+        context.exceptionHandler = ^(JSContext *, JSValue *exception) {
+            sawExpectedExceptionLineNumber = [exception[@"line"] toInt32] == expectedExceptionLineNumber;
+        };
+        [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        checkResult(@"evaluteScript exception on line 1", sawExpectedExceptionLineNumber);
+
+        expectedExceptionLineNumber = 2;
+        sawExpectedExceptionLineNumber = false;
+        [context evaluateScript:@"// Line 1\n!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        checkResult(@"evaluteScript exception on line 2", sawExpectedExceptionLineNumber);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        __block bool emptyExceptionSourceURL = false;
+        context.exceptionHandler = ^(JSContext *, JSValue *exception) {
+            emptyExceptionSourceURL = exception[@"sourceURL"].isUndefined;
+        };
+        [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        checkResult(@"evaluteScript: exception has no sourceURL", emptyExceptionSourceURL);
+
+        __block NSString *exceptionSourceURL = nil;
+        context.exceptionHandler = ^(JSContext *, JSValue *exception) {
+            exceptionSourceURL = [exception[@"sourceURL"] toString];
+        };
+        NSURL *url = [NSURL fileURLWithPath:@"/foo/bar.js"];
+        [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()" withSourceURL:url];
+        checkResult(@"evaluateScript:withSourceURL: exception has expected sourceURL", [exceptionSourceURL isEqualToString:[url absoluteString]]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"callback"] = ^{
+            JSContext *context = [JSContext currentContext];
+            context.exception = [JSValue valueWithNewErrorFromMessage:@"Something went wrong." inContext:context];
+        };
+        JSValue *result = [context evaluateScript:@"var result; try { callback(); } catch (e) { result = 'Caught exception'; }"];
+        checkResult(@"Explicit throw in callback - was caught by JavaScript", [result isEqualToObject:@"Caught exception"]);
+        checkResult(@"Explicit throw in callback - not thrown to Objective-C", !context.exception);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"callback"] = ^{
+            JSContext *context = [JSContext currentContext];
+            [context evaluateScript:@"!@#$%^&*() THIS IS NOT VALID JAVASCRIPT SYNTAX !@#$%^&*()"];
+        };
+        JSValue *result = [context evaluateScript:@"var result; try { callback(); } catch (e) { result = 'Caught exception'; }"];
+        checkResult(@"Implicit throw in callback - was caught by JavaScript", [result isEqualToObject:@"Caught exception"]);
+        checkResult(@"Implicit throw in callback - not thrown to Objective-C", !context.exception);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        [context evaluateScript:
+            @"function sum(array) { \
+                var result = 0; \
+                for (var i in array) \
+                    result += array[i]; \
+                return result; \
+            }"];
+        JSValue *array = [JSValue valueWithObject:@[@13, @2, @7] inContext:context];
+        JSValue *sumFunction = context[@"sum"];
+        JSValue *result = [sumFunction callWithArguments:@[ array ]];
+        checkResult(@"sum([13, 2, 7])", [result toInt32] == 22);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *mulAddFunction = [context evaluateScript:
+            @"(function(array, object) { \
+                var result = []; \
+                for (var i in array) \
+                    result.push(array[i] * object.x + object.y); \
+                return result; \
+            })"];
+        JSValue *result = [mulAddFunction callWithArguments:@[ @[ @2, @4, @8 ], @{ @"x":@0.5, @"y":@42 } ]];
+        checkResult(@"mulAddFunction", result.isObject && [[result toString] isEqual:@"43,44,46"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];        
+        JSValue *array = [JSValue valueWithNewArrayInContext:context];
+        checkResult(@"arrayLengthEmpty", [[array[@"length"] toNumber] unsignedIntegerValue] == 0);
+        JSValue *value1 = [JSValue valueWithInt32:42 inContext:context];
+        JSValue *value2 = [JSValue valueWithInt32:24 inContext:context];
+        NSUInteger lowIndex = 5;
+        NSUInteger maxLength = UINT_MAX;
+
+        [array setValue:value1 atIndex:lowIndex];
+        checkResult(@"array.length after put to low index", [[array[@"length"] toNumber] unsignedIntegerValue] == (lowIndex + 1));
+
+        [array setValue:value1 atIndex:(maxLength - 1)];
+        checkResult(@"array.length after put to maxLength - 1", [[array[@"length"] toNumber] unsignedIntegerValue] == maxLength);
+
+        [array setValue:value2 atIndex:maxLength];
+        checkResult(@"array.length after put to maxLength", [[array[@"length"] toNumber] unsignedIntegerValue] == maxLength);
+
+        [array setValue:value2 atIndex:(maxLength + 1)];
+        checkResult(@"array.length after put to maxLength + 1", [[array[@"length"] toNumber] unsignedIntegerValue] == maxLength);
+
+        if (sizeof(NSUInteger) == 8)
+            checkResult(@"valueAtIndex:0 is undefined", [array valueAtIndex:0].isUndefined);
+        else
+            checkResult(@"valueAtIndex:0", [[array valueAtIndex:0] toInt32] == 24);
+        checkResult(@"valueAtIndex:lowIndex", [[array valueAtIndex:lowIndex] toInt32] == 42);
+        checkResult(@"valueAtIndex:maxLength - 1", [[array valueAtIndex:(maxLength - 1)] toInt32] == 42);
+        checkResult(@"valueAtIndex:maxLength", [[array valueAtIndex:maxLength] toInt32] == 24);
+        checkResult(@"valueAtIndex:maxLength + 1", [[array valueAtIndex:(maxLength + 1)] toInt32] == 24);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *object = [JSValue valueWithNewObjectInContext:context];
+
+        object[@"point"] = @{ @"x":@1, @"y":@2 };
+        object[@"point"][@"x"] = @3;
+        CGPoint point = [object[@"point"] toPoint];
+        checkResult(@"toPoint", point.x == 3 && point.y == 2);
+
+        object[@{ @"toString":^{ return @"foo"; } }] = @"bar";
+        checkResult(@"toString in object literal used as subscript", [[object[@"foo"] toString] isEqual:@"bar"]);
+
+        object[[@"foobar" substringToIndex:3]] = @"bar";
+        checkResult(@"substring used as subscript", [[object[@"foo"] toString] isEqual:@"bar"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TextXYZ *testXYZ = [[TextXYZ alloc] init];
+        context[@"testXYZ"] = testXYZ;
+        testXYZ.x = 3;
+        testXYZ.y = 4;
+        testXYZ.z = 5;
+        [context evaluateScript:@"testXYZ.x = 13; testXYZ.y = 14;"];
+        [context evaluateScript:@"testXYZ.test('test')"];
+        checkResult(@"TextXYZ - testXYZTested", testXYZTested);
+        JSValue *result = [context evaluateScript:@"testXYZ.x + ',' + testXYZ.y + ',' + testXYZ.z"];
+        checkResult(@"TextXYZ - result", [result isEqualToObject:@"13,4,undefined"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        [context[@"Object"][@"prototype"] defineProperty:@"getterProperty" descriptor:@{
+            JSPropertyDescriptorGetKey:^{
+                return [JSContext currentThis][@"x"];
+            }
+        }];
+        JSValue *object = [JSValue valueWithObject:@{ @"x":@101 } inContext:context];
+        int result = [object [@"getterProperty"] toInt32];
+        checkResult(@"getterProperty", result == 101);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"concatenate"] = ^{
+            NSArray *arguments = [JSContext currentArguments];
+            if (![arguments count])
+                return @"";
+            NSString *message = [arguments[0] description];
+            for (NSUInteger index = 1; index < [arguments count]; ++index)
+                message = [NSString stringWithFormat:@"%@ %@", message, arguments[index]];
+            return message;
+        };
+        JSValue *result = [context evaluateScript:@"concatenate('Hello,', 'World!')"];
+        checkResult(@"concatenate", [result isEqualToObject:@"Hello, World!"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"foo"] = @YES;
+        checkResult(@"@YES is boolean", [context[@"foo"] isBoolean]);
+        JSValue *result = [context evaluateScript:@"typeof foo"];
+        checkResult(@"@YES is boolean", [result isEqualToObject:@"boolean"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *result = [context evaluateScript:@"String(console)"];
+        checkResult(@"String(console)", [result isEqualToObject:@"[object Console]"]);
+        result = [context evaluateScript:@"typeof console.log"];
+        checkResult(@"typeof console.log", [result isEqualToObject:@"function"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"String(testObject)"];
+        checkResult(@"String(testObject)", [result isEqualToObject:@"[object TestObject]"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"String(testObject.__proto__)"];
+        checkResult(@"String(testObject.__proto__)", [result isEqualToObject:@"[object TestObjectPrototype]"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"TestObject"] = [TestObject class];
+        JSValue *result = [context evaluateScript:@"String(TestObject)"];
+        checkResult(@"String(TestObject)", [result isEqualToObject:@"function TestObject() {\n    [native code]\n}"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue* value = [JSValue valueWithObject:[TestObject class] inContext:context];
+        checkResult(@"[value toObject] == [TestObject class]", [value toObject] == [TestObject class]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"TestObject"] = [TestObject class];
+        JSValue *result = [context evaluateScript:@"TestObject.parentTest()"];
+        checkResult(@"TestObject.parentTest()", [result isEqualToObject:@"TestObject"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObjectA"] = testObject;
+        context[@"testObjectB"] = testObject;
+        JSValue *result = [context evaluateScript:@"testObjectA == testObjectB"];
+        checkResult(@"testObjectA == testObjectB", result.isBoolean && [result toBool]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        testObject.point = (CGPoint){3,4};
+        JSValue *result = [context evaluateScript:@"var result = JSON.stringify(testObject.point); testObject.point = {x:12,y:14}; result"];
+        checkResult(@"testObject.point - result", [result isEqualToObject:@"{\"x\":3,\"y\":4}"]);
+        checkResult(@"testObject.point - {x:12,y:14}", testObject.point.x == 12 && testObject.point.y == 14);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        testObject.six = 6;
+        context[@"testObject"] = testObject;
+        context[@"mul"] = ^(int x, int y){ return x * y; };
+        JSValue *result = [context evaluateScript:@"mul(testObject.six, 7)"];
+        checkResult(@"mul(testObject.six, 7)", result.isNumber && [result toInt32] == 42);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        context[@"testObject"][@"variable"] = @4;
+        [context evaluateScript:@"++testObject.variable"];
+        checkResult(@"++testObject.variable", testObject.variable == 5);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"point"] = @{ @"x":@6, @"y":@7 };
+        JSValue *result = [context evaluateScript:@"point.x + ',' + point.y"];
+        checkResult(@"point.x + ',' + point.y", [result isEqualToObject:@"6,7"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"point"] = @{ @"x":@6, @"y":@7 };
+        JSValue *result = [context evaluateScript:@"point.x + ',' + point.y"];
+        checkResult(@"point.x + ',' + point.y", [result isEqualToObject:@"6,7"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"testObject.getString()"];
+        checkResult(@"testObject.getString()", result.isString && [result toInt32] == 42);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"testObject.testArgumentTypes(101,0.5,true,'foo',666,[false,'bar',false],{x:'baz'})"];
+        checkResult(@"testObject.testArgumentTypes", [result isEqualToObject:@"101,0.5,1,foo,666,bar,baz"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"testObject.getString.call(testObject)"];
+        checkResult(@"testObject.getString.call(testObject)", result.isString && [result toInt32] == 42);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        checkResult(@"testObject.getString.call({}) pre", !context.exception);
+        [context evaluateScript:@"testObject.getString.call({})"];
+        checkResult(@"testObject.getString.call({}) post", context.exception);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject* testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"var result = 0; testObject.callback(function(x){ result = x; }); result"];
+        checkResult(@"testObject.callback", result.isNumber && [result toInt32] == 42);
+        result = [context evaluateScript:@"testObject.bogusCallback"];
+        checkResult(@"testObject.bogusCallback == undefined", result.isUndefined);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject *testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSValue *result = [context evaluateScript:@"Function.prototype.toString.call(testObject.callback)"];
+        checkResult(@"Function.prototype.toString", !context.exception && !result.isUndefined);
+    }
+
+    @autoreleasepool {
+        JSContext *context1 = [[JSContext alloc] init];
+        JSContext *context2 = [[JSContext alloc] initWithVirtualMachine:context1.virtualMachine];
+        JSValue *value = [JSValue valueWithDouble:42 inContext:context2];
+        context1[@"passValueBetweenContexts"] = value;
+        JSValue *result = [context1 evaluateScript:@"passValueBetweenContexts"];
+        checkResult(@"[value isEqualToObject:result]", [value isEqualToObject:result]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"handleTheDictionary"] = ^(NSDictionary *dict) {
+            NSDictionary *expectedDict = @{
+                @"foo" : [NSNumber numberWithInt:1],
+                @"bar" : @{
+                    @"baz": [NSNumber numberWithInt:2]
+                }
+            };
+            checkResult(@"recursively convert nested dictionaries", [dict isEqualToDictionary:expectedDict]);
+        };
+        [context evaluateScript:@"var myDict = { \
+            'foo': 1, \
+            'bar': {'baz': 2} \
+        }; \
+        handleTheDictionary(myDict);"];
+
+        context[@"handleTheArray"] = ^(NSArray *array) {
+            NSArray *expectedArray = @[@"foo", @"bar", @[@"baz"]];
+            checkResult(@"recursively convert nested arrays", [array isEqualToArray:expectedArray]);
+        };
+        [context evaluateScript:@"var myArray = ['foo', 'bar', ['baz']]; handleTheArray(myArray);"];
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject *testObject = [TestObject testObject];
+        @autoreleasepool {
+            context[@"testObject"] = testObject;
+            [context evaluateScript:@"var constructor = Object.getPrototypeOf(testObject).constructor; constructor.prototype = undefined;"];
+            [context evaluateScript:@"testObject = undefined"];
+        }
+        
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        @autoreleasepool {
+            context[@"testObject"] = testObject;
+        }
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TextXYZ *testXYZ = [[TextXYZ alloc] init];
+
+        @autoreleasepool {
+            context[@"testXYZ"] = testXYZ;
+
+            [context evaluateScript:@" \
+                didClick = false; \
+                testXYZ.onclick = function() { \
+                    didClick = true; \
+                }; \
+                 \
+                testXYZ.weakOnclick = function() { \
+                    return 'foo'; \
+                }; \
+            "];
+        }
+
+        @autoreleasepool {
+            [testXYZ click];
+            JSValue *result = [context evaluateScript:@"didClick"];
+            checkResult(@"Event handler onclick", [result toBool]);
+        }
+
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        @autoreleasepool {
+            JSValue *result = [context evaluateScript:@"testXYZ.onclick"];
+            checkResult(@"onclick still around after GC", !(result.isNull || result.isUndefined));
+        }
+
+
+        @autoreleasepool {
+            JSValue *result = [context evaluateScript:@"testXYZ.weakOnclick"];
+            checkResult(@"weakOnclick not around after GC", result.isNull || result.isUndefined);
+        }
+
+        @autoreleasepool {
+            [context evaluateScript:@" \
+                didClick = false; \
+                testXYZ = null; \
+            "];
+        }
+
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        @autoreleasepool {
+            [testXYZ click];
+            JSValue *result = [context evaluateScript:@"didClick"];
+            checkResult(@"Event handler onclick doesn't fire", ![result toBool]);
+        }
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TinyDOMNode *root = [[TinyDOMNode alloc] initWithVirtualMachine:context.virtualMachine];
+        TinyDOMNode *lastNode = root;
+        for (NSUInteger i = 0; i < 3; i++) {
+            TinyDOMNode *newNode = [[TinyDOMNode alloc] initWithVirtualMachine:context.virtualMachine];
+            [lastNode appendChild:newNode];
+            lastNode = newNode;
+        }
+
+        @autoreleasepool {
+            context[@"root"] = root;
+            context[@"getLastNodeInChain"] = ^(TinyDOMNode *head){
+                TinyDOMNode *lastNode = nil;
+                while (head) {
+                    lastNode = head;
+                    head = [lastNode childAtIndex:0];
+                }
+                return lastNode;
+            };
+            [context evaluateScript:@"getLastNodeInChain(root).myCustomProperty = 42;"];
+        }
+
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        JSValue *myCustomProperty = [context evaluateScript:@"getLastNodeInChain(root).myCustomProperty"];
+        checkResult(@"My custom property == 42", myCustomProperty.isNumber && [myCustomProperty toInt32] == 42);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TinyDOMNode *root = [[TinyDOMNode alloc] initWithVirtualMachine:context.virtualMachine];
+        TinyDOMNode *lastNode = root;
+        for (NSUInteger i = 0; i < 3; i++) {
+            TinyDOMNode *newNode = [[TinyDOMNode alloc] initWithVirtualMachine:context.virtualMachine];
+            [lastNode appendChild:newNode];
+            lastNode = newNode;
+        }
+
+        @autoreleasepool {
+            context[@"root"] = root;
+            context[@"getLastNodeInChain"] = ^(TinyDOMNode *head){
+                TinyDOMNode *lastNode = nil;
+                while (head) {
+                    lastNode = head;
+                    head = [lastNode childAtIndex:0];
+                }
+                return lastNode;
+            };
+            [context evaluateScript:@"getLastNodeInChain(root).myCustomProperty = 42;"];
+
+            [root appendChild:[root childAtIndex:0]];
+            [root removeChildAtIndex:0];
+        }
+
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        JSValue *myCustomProperty = [context evaluateScript:@"getLastNodeInChain(root).myCustomProperty"];
+        checkResult(@"duplicate calls to addManagedReference don't cause things to die", myCustomProperty.isNumber && [myCustomProperty toInt32] == 42);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        JSValue *o = [JSValue valueWithNewObjectInContext:context];
+        o[@"foo"] = @"foo";
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        checkResult(@"JSValue correctly protected its internal value", [[o[@"foo"] toString] isEqualToString:@"foo"]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject *testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        [context evaluateScript:@"testObject.__lookupGetter__('variable').call({})"];
+        checkResult(@"Make sure we throw an exception when calling getter on incorrect |this|", context.exception);
+    }
+
+    @autoreleasepool {
+        static const unsigned count = 100;
+        NSMutableArray *array = [NSMutableArray arrayWithCapacity:count];
+        JSContext *context = [[JSContext alloc] init];
+        @autoreleasepool {
+            for (unsigned i = 0; i < count; ++i) {
+                JSValue *object = [JSValue valueWithNewObjectInContext:context];
+                JSManagedValue *managedObject = [JSManagedValue managedValueWithValue:object];
+                [array addObject:managedObject];
+            }
+        }
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+        for (unsigned i = 0; i < count; ++i)
+            [context.virtualMachine addManagedReference:array[i] withOwner:array];
+    }
+
+    @autoreleasepool {
+        TestObject *testObject = [TestObject testObject];
+        JSManagedValue *managedTestObject;
+        @autoreleasepool {
+            JSContext *context = [[JSContext alloc] init];
+            context[@"testObject"] = testObject;
+            managedTestObject = [JSManagedValue managedValueWithValue:context[@"testObject"]];
+            [context.virtualMachine addManagedReference:managedTestObject withOwner:testObject];
+        }
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        TestObject *testObject = [TestObject testObject];
+        context[@"testObject"] = testObject;
+        JSManagedValue *managedValue = nil;
+        @autoreleasepool {
+            JSValue *object = [JSValue valueWithNewObjectInContext:context];
+            managedValue = [JSManagedValue managedValueWithValue:object andOwner:testObject];
+            [context.virtualMachine addManagedReference:managedValue withOwner:testObject];
+        }
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"MyClass"] = ^{
+            JSValue *newThis = [JSValue valueWithNewObjectInContext:[JSContext currentContext]];
+            JSGlobalContextRef contextRef = [[JSContext currentContext] JSGlobalContextRef];
+            JSObjectRef newThisRef = JSValueToObject(contextRef, [newThis JSValueRef], NULL);
+            JSObjectSetPrototype(contextRef, newThisRef, [[JSContext currentContext][@"MyClass"][@"prototype"] JSValueRef]);
+            return newThis;
+        };
+
+        context[@"MyOtherClass"] = ^{
+            JSValue *newThis = [JSValue valueWithNewObjectInContext:[JSContext currentContext]];
+            JSGlobalContextRef contextRef = [[JSContext currentContext] JSGlobalContextRef];
+            JSObjectRef newThisRef = JSValueToObject(contextRef, [newThis JSValueRef], NULL);
+            JSObjectSetPrototype(contextRef, newThisRef, [[JSContext currentContext][@"MyOtherClass"][@"prototype"] JSValueRef]);
+            return newThis;
+        };
+
+        context.exceptionHandler = ^(JSContext *context, JSValue *exception) {
+            NSLog(@"EXCEPTION: %@", [exception toString]);
+            context.exception = nil;
+        };
+
+        JSValue *constructor1 = context[@"MyClass"];
+        JSValue *constructor2 = context[@"MyOtherClass"];
+
+        JSValue *value1 = [context evaluateScript:@"new MyClass()"];
+        checkResult(@"value1 instanceof MyClass", [value1 isInstanceOf:constructor1]);
+        checkResult(@"!(value1 instanceof MyOtherClass)", ![value1 isInstanceOf:constructor2]);
+        checkResult(@"MyClass.prototype.constructor === MyClass", [[context evaluateScript:@"MyClass.prototype.constructor === MyClass"] toBool]);
+        checkResult(@"MyClass instanceof Function", [[context evaluateScript:@"MyClass instanceof Function"] toBool]);
+
+        JSValue *value2 = [context evaluateScript:@"new MyOtherClass()"];
+        checkResult(@"value2 instanceof MyOtherClass", [value2 isInstanceOf:constructor2]);
+        checkResult(@"!(value2 instanceof MyClass)", ![value2 isInstanceOf:constructor1]);
+        checkResult(@"MyOtherClass.prototype.constructor === MyOtherClass", [[context evaluateScript:@"MyOtherClass.prototype.constructor === MyOtherClass"] toBool]);
+        checkResult(@"MyOtherClass instanceof Function", [[context evaluateScript:@"MyOtherClass instanceof Function"] toBool]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"MyClass"] = ^{
+            NSLog(@"I'm intentionally not returning anything.");
+        };
+        JSValue *result = [context evaluateScript:@"new MyClass()"];
+        checkResult(@"result === undefined", result.isUndefined);
+        checkResult(@"exception.message is correct'", context.exception 
+            && [@"Objective-C blocks called as constructors must return an object." isEqualToString:[context.exception[@"message"] toString]]);
+    }
+
+    @autoreleasepool {
+        checkResult(@"[JSContext currentThis] == nil outside of callback", ![JSContext currentThis]);
+        checkResult(@"[JSContext currentArguments] == nil outside of callback", ![JSContext currentArguments]);
+        if ([JSContext currentCallee])
+            checkResult(@"[JSContext currentCallee] == nil outside of callback", ![JSContext currentCallee]);
+    }
+
+    if ([JSContext currentCallee]) {
+        @autoreleasepool {
+            JSContext *context = [[JSContext alloc] init];
+            context[@"testFunction"] = ^{
+                checkResult(@"testFunction.foo === 42", [[JSContext currentCallee][@"foo"] toInt32] == 42);
+            };
+            context[@"testFunction"][@"foo"] = @42;
+            [context[@"testFunction"] callWithArguments:nil];
+
+            context[@"TestConstructor"] = ^{
+                JSValue *newThis = [JSValue valueWithNewObjectInContext:[JSContext currentContext]];
+                JSGlobalContextRef contextRef = [[JSContext currentContext] JSGlobalContextRef];
+                JSObjectRef newThisRef = JSValueToObject(contextRef, [newThis JSValueRef], NULL);
+                JSObjectSetPrototype(contextRef, newThisRef, [[JSContext currentCallee][@"prototype"] JSValueRef]);
+                return newThis;
+            };
+            checkResult(@"(new TestConstructor) instanceof TestConstructor", [context evaluateScript:@"(new TestConstructor) instanceof TestConstructor"]);
+        }
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"TestObject"] = [TestObject class];
+        JSValue *testObject = [context evaluateScript:@"(new TestObject())"];
+        checkResult(@"testObject instanceof TestObject", [testObject isInstanceOf:context[@"TestObject"]]);
+
+        context[@"TextXYZ"] = [TextXYZ class];
+        JSValue *textObject = [context evaluateScript:@"(new TextXYZ(\"Called TextXYZ constructor!\"))"];
+        checkResult(@"textObject instanceof TextXYZ", [textObject isInstanceOf:context[@"TextXYZ"]]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"ClassA"] = [ClassA class];
+        context[@"ClassB"] = [ClassB class];
+        context[@"ClassC"] = [ClassC class]; // Should print error message about too many inits found.
+        context[@"ClassCPrime"] = [ClassCPrime class]; // Ditto.
+
+        JSValue *a = [context evaluateScript:@"(new ClassA(42))"];
+        checkResult(@"a instanceof ClassA", [a isInstanceOf:context[@"ClassA"]]);
+        checkResult(@"a.initialize() is callable", [[a invokeMethod:@"initialize" withArguments:@[]] toInt32] == 42);
+
+        JSValue *b = [context evaluateScript:@"(new ClassB(42, 53))"];
+        checkResult(@"b instanceof ClassB", [b isInstanceOf:context[@"ClassB"]]);
+
+        JSValue *canConstructClassC = [context evaluateScript:@"(function() { \
+            try { \
+                (new ClassC(1, 2)); \
+                return true; \
+            } catch(e) { \
+                return false; \
+            } \
+        })()"];
+        checkResult(@"shouldn't be able to construct ClassC", ![canConstructClassC toBool]);
+
+        JSValue *canConstructClassCPrime = [context evaluateScript:@"(function() { \
+            try { \
+                (new ClassCPrime(1)); \
+                return true; \
+            } catch(e) { \
+                return false; \
+            } \
+        })()"];
+        checkResult(@"shouldn't be able to construct ClassCPrime", ![canConstructClassCPrime toBool]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"ClassA"] = [ClassA class];
+        context.exceptionHandler = ^(JSContext *context, JSValue *exception) {
+            NSLog(@"%@", [exception toString]);
+            context.exception = exception;
+        };
+
+        checkResult(@"ObjC Constructor without 'new' pre", !context.exception);
+        [context evaluateScript:@"ClassA(42)"];
+        checkResult(@"ObjC Constructor without 'new' post", context.exception);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"ClassD"] = [ClassD class];
+        context[@"ClassE"] = [ClassE class];
+       
+        JSValue *d = [context evaluateScript:@"(new ClassD())"];
+        checkResult(@"Returning instance of ClassE from ClassD's init has correct class", [d isInstanceOf:context[@"ClassE"]]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        while (!evilAllocationObjectWasDealloced) {
+            @autoreleasepool {
+                EvilAllocationObject *evilObject = [[EvilAllocationObject alloc] initWithContext:context];
+                context[@"evilObject"] = evilObject;
+                context[@"evilObject"] = nil;
+            }
+        }
+        checkResult(@"EvilAllocationObject was successfully dealloced without crashing", evilAllocationObjectWasDealloced);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        checkResult(@"default context.name is nil", context.name == nil);
+        NSString *name1 = @"Name1";
+        NSString *name2 = @"Name2";
+        context.name = name1;
+        NSString *fetchedName1 = context.name;
+        context.name = name2;
+        NSString *fetchedName2 = context.name;
+        context.name = nil;
+        NSString *fetchedName3 = context.name;
+        checkResult(@"fetched context.name was expected", [fetchedName1 isEqualToString:name1]);
+        checkResult(@"fetched context.name was expected", [fetchedName2 isEqualToString:name2]);
+        checkResult(@"fetched context.name was expected", ![fetchedName1 isEqualToString:fetchedName2]);
+        checkResult(@"fetched context.name was expected", fetchedName3 == nil);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        context[@"UnexportedObject"] = [UnexportedObject class];
+        context[@"makeObject"] = ^{
+            return [[UnexportedObject alloc] init];
+        };
+        JSValue *result = [context evaluateScript:@"(makeObject() instanceof UnexportedObject)"];
+        checkResult(@"makeObject() instanceof UnexportedObject", result.isBoolean && [result toBool]);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        [[JSValue valueWithInt32:42 inContext:context] toDictionary];
+        [[JSValue valueWithInt32:42 inContext:context] toArray];
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+
+        // Create the root, make it reachable from JS, and force an EdenCollection
+        // so that we scan the external object graph.
+        TestObject *root = [TestObject testObject];
+        @autoreleasepool {
+            context[@"root"] = root;
+        }
+        JSSynchronousEdenCollectForDebugging([context JSGlobalContextRef]);
+
+        // Create a new Obj-C object only reachable via the external object graph
+        // through the object we already scanned during the EdenCollection.
+        TestObject *child = [TestObject testObject];
+        [context.virtualMachine addManagedReference:child withOwner:root];
+
+        // Create a new managed JSValue that will only be kept alive if we properly rescan
+        // the external object graph.
+        JSManagedValue *managedJSObject = nil;
+        @autoreleasepool {
+            JSValue *jsObject = [JSValue valueWithObject:@"hello" inContext:context];
+            managedJSObject = [JSManagedValue managedValueWithValue:jsObject];
+            [context.virtualMachine addManagedReference:managedJSObject withOwner:child];
+        }
+
+        // Force another EdenCollection. It should rescan the new part of the external object graph.
+        JSSynchronousEdenCollectForDebugging([context JSGlobalContextRef]);
+        
+        // Check that the managed JSValue is still alive.
+        checkResult(@"EdenCollection doesn't reclaim new managed values", [managedJSObject value] != nil);
+    }
+
+    @autoreleasepool {
+        JSContext *context = [[JSContext alloc] init];
+        
+        pthread_t threadID;
+        pthread_create(&threadID, NULL, &threadMain, (__bridge void*)context);
+        pthread_join(threadID, nullptr);
+        JSSynchronousGarbageCollectForDebugging([context JSGlobalContextRef]);
+
+        checkResult(@"Did not crash after entering the VM from another thread", true);
+    }
+    
+    @autoreleasepool {
+        std::vector threads;
+        bool ok = true;
+        for (unsigned i = 0; i < 5; ++i) {
+            pthread_t threadID;
+            pthread_create(&threadID, nullptr, multiVMThreadMain, &ok);
+            threads.push_back(threadID);
+        }
+
+        for (pthread_t thread : threads)
+            pthread_join(thread, nullptr);
+
+        checkResult(@"Ran code in five concurrent VMs that GC'd", ok);
+    }
+    
+    currentThisInsideBlockGetterTest();
+    runDateTests();
+    runJSExportTests();
+    runRegress141275();
+    runRegress141809();
+}
+
+@protocol NumberProtocol 
+
+@property (nonatomic) NSInteger number;
+
+@end
+
+@interface NumberObject : NSObject 
+
+@property (nonatomic) NSInteger number;
+
+@end
+
+@implementation NumberObject
+
+@end
+
+// Check that negative NSIntegers retain the correct value when passed into JS code.
+static void checkNegativeNSIntegers()
+{
+    NumberObject *container = [[NumberObject alloc] init];
+    container.number = -1;
+    JSContext *context = [[JSContext alloc] init];
+    context[@"container"] = container;
+    NSString *jsID = @"var getContainerNumber = function() { return container.number }";
+    [context evaluateScript:jsID];
+    JSValue *jsFunction = context[@"getContainerNumber"];
+    JSValue *result = [jsFunction callWithArguments:@[]];
+    
+    checkResult(@"Negative number maintained its original value", [[result toString] isEqualToString:@"-1"]);
+}
+
+void testObjectiveCAPI()
+{
+    NSLog(@"Testing Objective-C API");
+    checkNegativeNSIntegers();
+    testObjectiveCAPIMain();
+}
+
+#else
+
+void testObjectiveCAPI()
+{
+}
+
+#endif // JSC_OBJC_API_ENABLED
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..e50da8c
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,2 @@
+Harri Porten (porten@kde.org)
+Peter Kelly (pmk@post.com)
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..968545c
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,1524 @@
+cmake_minimum_required(VERSION 2.8.12)
+include(WebKitCommon)
+set_property(DIRECTORY . PROPERTY FOLDER "JavaScriptCore")
+
+set(JavaScriptCore_INCLUDE_DIRECTORIES
+    "${CMAKE_BINARY_DIR}"
+    "${JAVASCRIPTCORE_DIR}"
+    "${JAVASCRIPTCORE_DIR}/.."
+    "${JAVASCRIPTCORE_DIR}/API"
+    "${JAVASCRIPTCORE_DIR}/ForwardingHeaders"
+    "${JAVASCRIPTCORE_DIR}/assembler"
+    "${JAVASCRIPTCORE_DIR}/b3"
+    "${JAVASCRIPTCORE_DIR}/b3/air"
+    "${JAVASCRIPTCORE_DIR}/bindings"
+    "${JAVASCRIPTCORE_DIR}/builtins"
+    "${JAVASCRIPTCORE_DIR}/bytecode"
+    "${JAVASCRIPTCORE_DIR}/bytecompiler"
+    "${JAVASCRIPTCORE_DIR}/dfg"
+    "${JAVASCRIPTCORE_DIR}/disassembler"
+    "${JAVASCRIPTCORE_DIR}/disassembler/udis86"
+    "${JAVASCRIPTCORE_DIR}/disassembler/arm64"
+    "${JAVASCRIPTCORE_DIR}/domjit"
+    "${JAVASCRIPTCORE_DIR}/ftl"
+    "${JAVASCRIPTCORE_DIR}/heap"
+    "${JAVASCRIPTCORE_DIR}/debugger"
+    "${JAVASCRIPTCORE_DIR}/inspector"
+    "${JAVASCRIPTCORE_DIR}/inspector/agents"
+    "${JAVASCRIPTCORE_DIR}/inspector/augmentable"
+    "${JAVASCRIPTCORE_DIR}/inspector/remote"
+    "${JAVASCRIPTCORE_DIR}/interpreter"
+    "${JAVASCRIPTCORE_DIR}/jit"
+    "${JAVASCRIPTCORE_DIR}/llint"
+    "${JAVASCRIPTCORE_DIR}/parser"
+    "${JAVASCRIPTCORE_DIR}/profiler"
+    "${JAVASCRIPTCORE_DIR}/replay"
+    "${JAVASCRIPTCORE_DIR}/runtime"
+    "${JAVASCRIPTCORE_DIR}/tools"
+    "${JAVASCRIPTCORE_DIR}/wasm"
+    "${JAVASCRIPTCORE_DIR}/wasm/js"
+    "${JAVASCRIPTCORE_DIR}/yarr"
+    "${DERIVED_SOURCES_DIR}/ForwardingHeaders"
+    "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}"
+    "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector"
+)
+
+set(JavaScriptCore_SYSTEM_INCLUDE_DIRECTORIES
+    "${ICU_INCLUDE_DIRS}"
+)
+
+set(JavaScriptCore_SOURCES
+    API/JSBase.cpp
+    API/JSCTestRunnerUtils.cpp
+    API/JSCallbackConstructor.cpp
+    API/JSCallbackFunction.cpp
+    API/JSCallbackObject.cpp
+    API/JSClassRef.cpp
+    API/JSContextRef.cpp
+    API/JSObjectRef.cpp
+    API/JSTypedArray.cpp
+    API/JSScriptRef.cpp
+    API/JSStringRef.cpp
+    API/JSValueRef.cpp
+    API/JSWeakObjectMapRefPrivate.cpp
+    API/OpaqueJSString.cpp
+
+    assembler/ARMAssembler.cpp
+    assembler/LinkBuffer.cpp
+    assembler/MacroAssembler.cpp
+    assembler/MacroAssemblerARM.cpp
+    assembler/MacroAssemblerARMv7.cpp
+    assembler/MacroAssemblerCodeRef.cpp
+    assembler/MacroAssemblerPrinter.cpp
+    assembler/MacroAssemblerX86Common.cpp
+
+    b3/air/AirAllocateStack.cpp
+    b3/air/AirArg.cpp
+    b3/air/AirBasicBlock.cpp
+    b3/air/AirCCallSpecial.cpp
+    b3/air/AirCCallingConvention.cpp
+    b3/air/AirCode.cpp
+    b3/air/AirCustom.cpp
+    b3/air/AirDumpAsJS.cpp
+    b3/air/AirEliminateDeadCode.cpp
+    b3/air/AirEmitShuffle.cpp
+    b3/air/AirFixObviousSpills.cpp
+    b3/air/AirFixPartialRegisterStalls.cpp
+    b3/air/AirGenerate.cpp
+    b3/air/AirGenerated.cpp
+    b3/air/AirHandleCalleeSaves.cpp
+    b3/air/AirInsertionSet.cpp
+    b3/air/AirInst.cpp
+    b3/air/AirIteratedRegisterCoalescing.cpp
+    b3/air/AirKind.cpp
+    b3/air/AirLogRegisterPressure.cpp
+    b3/air/AirLowerAfterRegAlloc.cpp
+    b3/air/AirLowerEntrySwitch.cpp
+    b3/air/AirLowerMacros.cpp
+    b3/air/AirOptimizeBlockOrder.cpp
+    b3/air/AirPadInterference.cpp
+    b3/air/AirPhaseScope.cpp
+    b3/air/AirReportUsedRegisters.cpp
+    b3/air/AirSimplifyCFG.cpp
+    b3/air/AirSpecial.cpp
+    b3/air/AirSpillEverything.cpp
+    b3/air/AirStackSlot.cpp
+    b3/air/AirStackSlotKind.cpp
+    b3/air/AirTmp.cpp
+    b3/air/AirTmpWidth.cpp
+    b3/air/AirValidate.cpp
+
+    b3/B3ArgumentRegValue.cpp
+    b3/B3BasicBlock.cpp
+    b3/B3BlockInsertionSet.cpp
+    b3/B3BreakCriticalEdges.cpp
+    b3/B3CCallValue.cpp
+    b3/B3CaseCollection.cpp
+    b3/B3CheckSpecial.cpp
+    b3/B3CheckValue.cpp
+    b3/B3Common.cpp
+    b3/B3Commutativity.cpp
+    b3/B3Compile.cpp
+    b3/B3Compilation.cpp
+    b3/B3Const32Value.cpp
+    b3/B3Const64Value.cpp
+    b3/B3ConstDoubleValue.cpp
+    b3/B3ConstFloatValue.cpp
+    b3/B3ConstrainedValue.cpp
+    b3/B3DataSection.cpp
+    b3/B3DuplicateTails.cpp
+    b3/B3Effects.cpp
+    b3/B3EliminateCommonSubexpressions.cpp
+    b3/B3FenceValue.cpp
+    b3/B3FixSSA.cpp
+    b3/B3FoldPathConstants.cpp
+    b3/B3FrequencyClass.cpp
+    b3/B3Generate.cpp
+    b3/B3HeapRange.cpp
+    b3/B3InferSwitches.cpp
+    b3/B3InsertionSet.cpp
+    b3/B3Kind.cpp
+    b3/B3LegalizeMemoryOffsets.cpp
+    b3/B3LowerMacros.cpp
+    b3/B3LowerMacrosAfterOptimizations.cpp
+    b3/B3LowerToAir.cpp
+    b3/B3MathExtras.cpp
+    b3/B3MemoryValue.cpp
+    b3/B3MoveConstants.cpp
+    b3/B3OpaqueByproducts.cpp
+    b3/B3Opcode.cpp
+    b3/B3Origin.cpp
+    b3/B3OriginDump.cpp
+    b3/B3PatchpointSpecial.cpp
+    b3/B3PatchpointValue.cpp
+    b3/B3PhaseScope.cpp
+    b3/B3PhiChildren.cpp
+    b3/B3Procedure.cpp
+    b3/B3PureCSE.cpp
+    b3/B3ReduceDoubleToFloat.cpp
+    b3/B3ReduceStrength.cpp
+    b3/B3SSACalculator.cpp
+    b3/B3SlotBaseValue.cpp
+    b3/B3StackmapGenerationParams.cpp
+    b3/B3StackmapSpecial.cpp
+    b3/B3StackmapValue.cpp
+    b3/B3StackSlot.cpp
+    b3/B3SwitchCase.cpp
+    b3/B3SwitchValue.cpp
+    b3/B3TimingScope.cpp
+    b3/B3Type.cpp
+    b3/B3UpsilonValue.cpp
+    b3/B3UseCounts.cpp
+    b3/B3Validate.cpp
+    b3/B3Value.cpp
+    b3/B3ValueKey.cpp
+    b3/B3ValueRep.cpp
+    b3/B3Variable.cpp
+    b3/B3VariableValue.cpp
+    b3/B3WasmAddressValue.cpp
+    b3/B3WasmBoundsCheckValue.cpp
+
+    bindings/ScriptFunctionCall.cpp
+    bindings/ScriptObject.cpp
+    bindings/ScriptValue.cpp
+
+    builtins/BuiltinExecutables.cpp
+    builtins/BuiltinExecutableCreator.cpp
+
+    bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
+    bytecode/ArithProfile.cpp
+    bytecode/ArrayAllocationProfile.cpp
+    bytecode/ArrayProfile.cpp
+    bytecode/BytecodeBasicBlock.cpp
+    bytecode/BytecodeGeneratorification.cpp
+    bytecode/BytecodeIntrinsicRegistry.cpp
+    bytecode/BytecodeLivenessAnalysis.cpp
+    bytecode/BytecodeRewriter.cpp
+    bytecode/CallEdge.cpp
+    bytecode/CallLinkInfo.cpp
+    bytecode/CallLinkStatus.cpp
+    bytecode/CallMode.cpp
+    bytecode/CallVariant.cpp
+    bytecode/CodeBlock.cpp
+    bytecode/CodeBlockHash.cpp
+    bytecode/CodeBlockJettisoningWatchpoint.cpp
+    bytecode/CodeOrigin.cpp
+    bytecode/CodeType.cpp
+    bytecode/ComplexGetStatus.cpp
+    bytecode/DFGExitProfile.cpp
+    bytecode/DOMJITAccessCasePatchpointParams.cpp
+    bytecode/DataFormat.cpp
+    bytecode/DeferredCompilationCallback.cpp
+    bytecode/DeferredSourceDump.cpp
+    bytecode/DirectEvalCodeCache.cpp
+    bytecode/EvalCodeBlock.cpp
+    bytecode/ExecutionCounter.cpp
+    bytecode/ExitKind.cpp
+    bytecode/ExitingJITType.cpp
+    bytecode/FunctionCodeBlock.cpp
+    bytecode/GetByIdStatus.cpp
+    bytecode/GetByIdVariant.cpp
+    bytecode/InlineAccess.cpp
+    bytecode/InlineCallFrame.cpp
+    bytecode/InlineCallFrameSet.cpp
+    bytecode/JumpTable.cpp
+    bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
+    bytecode/LazyOperandValueProfile.cpp
+    bytecode/MethodOfGettingAValueProfile.cpp
+    bytecode/ModuleProgramCodeBlock.cpp
+    bytecode/ObjectPropertyCondition.cpp
+    bytecode/ObjectPropertyConditionSet.cpp
+    bytecode/Opcode.cpp
+    bytecode/PolymorphicAccess.cpp
+    bytecode/PreciseJumpTargets.cpp
+    bytecode/ProgramCodeBlock.cpp
+    bytecode/PropertyCondition.cpp
+    bytecode/PutByIdFlags.cpp
+    bytecode/PutByIdStatus.cpp
+    bytecode/PutByIdVariant.cpp
+    bytecode/ReduceWhitespace.cpp
+    bytecode/SpecialPointer.cpp
+    bytecode/SpeculatedType.cpp
+    bytecode/StructureSet.cpp
+    bytecode/StructureStubClearingWatchpoint.cpp
+    bytecode/StructureStubInfo.cpp
+    bytecode/SuperSampler.cpp
+    bytecode/ToThisStatus.cpp
+    bytecode/TrackedReferences.cpp
+    bytecode/UnlinkedCodeBlock.cpp
+    bytecode/UnlinkedEvalCodeBlock.cpp
+    bytecode/UnlinkedFunctionCodeBlock.cpp
+    bytecode/UnlinkedFunctionExecutable.cpp
+    bytecode/UnlinkedInstructionStream.cpp
+    bytecode/UnlinkedModuleProgramCodeBlock.cpp
+    bytecode/UnlinkedProgramCodeBlock.cpp
+    bytecode/ValueRecovery.cpp
+    bytecode/VariableWriteFireDetail.cpp
+    bytecode/VirtualRegister.cpp
+    bytecode/Watchpoint.cpp
+
+    bytecompiler/BytecodeGenerator.cpp
+    bytecompiler/NodesCodegen.cpp
+
+    debugger/Debugger.cpp
+    debugger/DebuggerCallFrame.cpp
+    debugger/DebuggerLocation.cpp
+    debugger/DebuggerParseData.cpp
+    debugger/DebuggerScope.cpp
+
+    dfg/DFGAbstractHeap.cpp
+    dfg/DFGAbstractValue.cpp
+    dfg/DFGAdaptiveInferredPropertyValueWatchpoint.cpp
+    dfg/DFGAdaptiveStructureWatchpoint.cpp
+    dfg/DFGArgumentsEliminationPhase.cpp
+    dfg/DFGArgumentsUtilities.cpp
+    dfg/DFGArithMode.cpp
+    dfg/DFGArrayMode.cpp
+    dfg/DFGAtTailAbstractState.cpp
+    dfg/DFGAvailability.cpp
+    dfg/DFGAvailabilityMap.cpp
+    dfg/DFGBackwardsPropagationPhase.cpp
+    dfg/DFGBasicBlock.cpp
+    dfg/DFGBlockInsertionSet.cpp
+    dfg/DFGBlockSet.cpp
+    dfg/DFGByteCodeParser.cpp
+    dfg/DFGCFAPhase.cpp
+    dfg/DFGCFGSimplificationPhase.cpp
+    dfg/DFGCPSRethreadingPhase.cpp
+    dfg/DFGCSEPhase.cpp
+    dfg/DFGCapabilities.cpp
+    dfg/DFGCleanUpPhase.cpp
+    dfg/DFGClobberSet.cpp
+    dfg/DFGClobberize.cpp
+    dfg/DFGClobbersExitState.cpp
+    dfg/DFGCombinedLiveness.cpp
+    dfg/DFGCommon.cpp
+    dfg/DFGCommonData.cpp
+    dfg/DFGCompilationKey.cpp
+    dfg/DFGCompilationMode.cpp
+    dfg/DFGConstantFoldingPhase.cpp
+    dfg/DFGConstantHoistingPhase.cpp
+    dfg/DFGCriticalEdgeBreakingPhase.cpp
+    dfg/DFGDCEPhase.cpp
+    dfg/DFGDOMJITPatchpointParams.cpp
+    dfg/DFGDesiredIdentifiers.cpp
+    dfg/DFGDesiredTransitions.cpp
+    dfg/DFGDesiredWatchpoints.cpp
+    dfg/DFGDesiredWeakReferences.cpp
+    dfg/DFGDisassembler.cpp
+    dfg/DFGDoesGC.cpp
+    dfg/DFGDriver.cpp
+    dfg/DFGEdge.cpp
+    dfg/DFGEpoch.cpp
+    dfg/DFGFailedFinalizer.cpp
+    dfg/DFGFinalizer.cpp
+    dfg/DFGFixupPhase.cpp
+    dfg/DFGFlowIndexing.cpp
+    dfg/DFGFlushFormat.cpp
+    dfg/DFGFlushedAt.cpp
+    dfg/DFGLiveCatchVariablePreservationPhase.cpp
+    dfg/DFGFrozenValue.cpp
+    dfg/DFGGraph.cpp
+    dfg/DFGGraphSafepoint.cpp
+    dfg/DFGHeapLocation.cpp
+    dfg/DFGInPlaceAbstractState.cpp
+    dfg/DFGInferredTypeCheck.cpp
+    dfg/DFGInsertionSet.cpp
+    dfg/DFGIntegerCheckCombiningPhase.cpp
+    dfg/DFGIntegerRangeOptimizationPhase.cpp
+    dfg/DFGInvalidationPointInjectionPhase.cpp
+    dfg/DFGJITCode.cpp
+    dfg/DFGJITCompiler.cpp
+    dfg/DFGJITFinalizer.cpp
+    dfg/DFGJumpReplacement.cpp
+    dfg/DFGLICMPhase.cpp
+    dfg/DFGLazyJSValue.cpp
+    dfg/DFGLazyNode.cpp
+    dfg/DFGLivenessAnalysisPhase.cpp
+    dfg/DFGLongLivedState.cpp
+    dfg/DFGLoopPreHeaderCreationPhase.cpp
+    dfg/DFGMaximalFlushInsertionPhase.cpp
+    dfg/DFGMayExit.cpp
+    dfg/DFGMinifiedGraph.cpp
+    dfg/DFGMinifiedNode.cpp
+    dfg/DFGMovHintRemovalPhase.cpp
+    dfg/DFGMultiGetByOffsetData.cpp
+    dfg/DFGNaturalLoops.cpp
+    dfg/DFGNode.cpp
+    dfg/DFGNodeAbstractValuePair.cpp
+    dfg/DFGNodeFlags.cpp
+    dfg/DFGNodeFlowProjection.cpp
+    dfg/DFGNodeOrigin.cpp
+    dfg/DFGOSRAvailabilityAnalysisPhase.cpp
+    dfg/DFGOSREntry.cpp
+    dfg/DFGOSREntrypointCreationPhase.cpp
+    dfg/DFGOSRExit.cpp
+    dfg/DFGOSRExitBase.cpp
+    dfg/DFGOSRExitCompiler.cpp
+    dfg/DFGOSRExitCompiler32_64.cpp
+    dfg/DFGOSRExitCompiler64.cpp
+    dfg/DFGOSRExitCompilerCommon.cpp
+    dfg/DFGOSRExitFuzz.cpp
+    dfg/DFGOSRExitJumpPlaceholder.cpp
+    dfg/DFGOSRExitPreparation.cpp
+    dfg/DFGObjectAllocationSinkingPhase.cpp
+    dfg/DFGObjectMaterializationData.cpp
+    dfg/DFGOperations.cpp
+    dfg/DFGPhantomInsertionPhase.cpp
+    dfg/DFGPhase.cpp
+    dfg/DFGPhiChildren.cpp
+    dfg/DFGPlan.cpp
+    dfg/DFGPrePostNumbering.cpp
+    dfg/DFGPredictionInjectionPhase.cpp
+    dfg/DFGPredictionPropagationPhase.cpp
+    dfg/DFGPromotedHeapLocation.cpp
+    dfg/DFGPureValue.cpp
+    dfg/DFGPutStackSinkingPhase.cpp
+    dfg/DFGRegisteredStructureSet.cpp
+    dfg/DFGSSACalculator.cpp
+    dfg/DFGSSAConversionPhase.cpp
+    dfg/DFGSSALoweringPhase.cpp
+    dfg/DFGSafepoint.cpp
+    dfg/DFGSpeculativeJIT.cpp
+    dfg/DFGSpeculativeJIT32_64.cpp
+    dfg/DFGSpeculativeJIT64.cpp
+    dfg/DFGStackLayoutPhase.cpp
+    dfg/DFGStaticExecutionCountEstimationPhase.cpp
+    dfg/DFGStoreBarrierClusteringPhase.cpp
+    dfg/DFGStoreBarrierInsertionPhase.cpp
+    dfg/DFGStrengthReductionPhase.cpp
+    dfg/DFGStructureAbstractValue.cpp
+    dfg/DFGThreadData.cpp
+    dfg/DFGThunks.cpp
+    dfg/DFGTierUpCheckInjectionPhase.cpp
+    dfg/DFGToFTLDeferredCompilationCallback.cpp
+    dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp
+    dfg/DFGTransition.cpp
+    dfg/DFGTypeCheckHoistingPhase.cpp
+    dfg/DFGUnificationPhase.cpp
+    dfg/DFGUseKind.cpp
+    dfg/DFGValidate.cpp
+    dfg/DFGValueSource.cpp
+    dfg/DFGValueStrength.cpp
+    dfg/DFGVarargsForwardingPhase.cpp
+    dfg/DFGVariableAccessData.cpp
+    dfg/DFGVariableAccessDataDump.cpp
+    dfg/DFGVariableEvent.cpp
+    dfg/DFGVariableEventStream.cpp
+    dfg/DFGVirtualRegisterAllocationPhase.cpp
+    dfg/DFGWatchpointCollectionPhase.cpp
+    dfg/DFGWorklist.cpp
+
+    disassembler/ARM64Disassembler.cpp
+    disassembler/ARMLLVMDisassembler.cpp
+    disassembler/ARMv7Disassembler.cpp
+    disassembler/Disassembler.cpp
+    disassembler/UDis86Disassembler.cpp
+    disassembler/X86Disassembler.cpp
+
+    disassembler/ARM64/A64DOpcode.cpp
+
+    disassembler/ARMv7/ARMv7DOpcode.cpp
+
+    disassembler/udis86/udis86.c
+    disassembler/udis86/udis86_decode.c
+    disassembler/udis86/udis86_itab_holder.c
+    disassembler/udis86/udis86_syn-att.c
+    disassembler/udis86/udis86_syn-intel.c
+    disassembler/udis86/udis86_syn.c
+
+    domjit/DOMJITAbstractHeap.cpp
+    domjit/DOMJITHeapRange.cpp
+
+    ftl/FTLAbstractHeap.cpp
+    ftl/FTLAbstractHeapRepository.cpp
+    ftl/FTLAvailableRecovery.cpp
+    ftl/FTLCapabilities.cpp
+    ftl/FTLCommonValues.cpp
+    ftl/FTLCompile.cpp
+    ftl/FTLDOMJITPatchpointParams.cpp
+    ftl/FTLExceptionTarget.cpp
+    ftl/FTLExitArgument.cpp
+    ftl/FTLExitArgumentForOperand.cpp
+    ftl/FTLExitPropertyValue.cpp
+    ftl/FTLExitTimeObjectMaterialization.cpp
+    ftl/FTLExitValue.cpp
+    ftl/FTLFail.cpp
+    ftl/FTLForOSREntryJITCode.cpp
+    ftl/FTLJITCode.cpp
+    ftl/FTLJITFinalizer.cpp
+    ftl/FTLLazySlowPath.cpp
+    ftl/FTLLink.cpp
+    ftl/FTLLocation.cpp
+    ftl/FTLLowerDFGToB3.cpp
+    ftl/FTLOSREntry.cpp
+    ftl/FTLOSRExit.cpp
+    ftl/FTLOSRExitCompiler.cpp
+    ftl/FTLOSRExitHandle.cpp
+    ftl/FTLOperations.cpp
+    ftl/FTLOutput.cpp
+    ftl/FTLPatchpointExceptionHandle.cpp
+    ftl/FTLRecoveryOpcode.cpp
+    ftl/FTLSaveRestore.cpp
+    ftl/FTLSlowPathCall.cpp
+    ftl/FTLSlowPathCallKey.cpp
+    ftl/FTLState.cpp
+    ftl/FTLThunks.cpp
+    ftl/FTLValueRange.cpp
+
+    heap/AllocatorAttributes.cpp
+    heap/CellContainer.cpp
+    heap/CodeBlockSet.cpp
+    heap/CollectionScope.cpp
+    heap/ConservativeRoots.cpp
+    heap/DeferGC.cpp
+    heap/DestructionMode.cpp
+    heap/EdenGCActivityCallback.cpp
+    heap/FullGCActivityCallback.cpp
+    heap/FreeList.cpp
+    heap/GCActivityCallback.cpp
+    heap/GCLogging.cpp
+    heap/HandleSet.cpp
+    heap/HandleStack.cpp
+    heap/Heap.cpp
+    heap/HeapCell.cpp
+    heap/HeapHelperPool.cpp
+    heap/HeapProfiler.cpp
+    heap/HeapSnapshot.cpp
+    heap/HeapSnapshotBuilder.cpp
+    heap/HeapStatistics.cpp
+    heap/HeapTimer.cpp
+    heap/HeapVerifier.cpp
+    heap/IncrementalSweeper.cpp
+    heap/JITStubRoutineSet.cpp
+    heap/LargeAllocation.cpp
+    heap/LiveObjectList.cpp
+    heap/MachineStackMarker.cpp
+    heap/MarkStack.cpp
+    heap/MarkedAllocator.cpp
+    heap/MarkedBlock.cpp
+    heap/MarkedSpace.cpp
+    heap/MarkingConstraint.cpp
+    heap/MarkingConstraintSet.cpp
+    heap/MutatorScheduler.cpp
+    heap/MutatorState.cpp
+    heap/SlotVisitor.cpp
+    heap/SpaceTimeMutatorScheduler.cpp
+    heap/StochasticSpaceTimeMutatorScheduler.cpp
+    heap/StopIfNecessaryTimer.cpp
+    heap/Subspace.cpp
+    heap/SynchronousStopTheWorldMutatorScheduler.cpp
+    heap/VisitRaceKey.cpp
+    heap/Weak.cpp
+    heap/WeakBlock.cpp
+    heap/WeakHandleOwner.cpp
+    heap/WeakSet.cpp
+    heap/WriteBarrierSupport.cpp
+
+    inspector/AsyncStackTrace.cpp
+    inspector/ConsoleMessage.cpp
+    inspector/ContentSearchUtilities.cpp
+    inspector/EventLoop.cpp
+    inspector/IdentifiersFactory.cpp
+    inspector/InjectedScript.cpp
+    inspector/InjectedScriptBase.cpp
+    inspector/InjectedScriptHost.cpp
+    inspector/InjectedScriptManager.cpp
+    inspector/InjectedScriptModule.cpp
+    inspector/InspectorAgentRegistry.cpp
+    inspector/InspectorFrontendRouter.cpp
+    inspector/InspectorBackendDispatcher.cpp
+    inspector/InspectorValues.cpp
+    inspector/JSGlobalObjectConsoleClient.cpp
+    inspector/JSGlobalObjectInspectorController.cpp
+    inspector/JSGlobalObjectScriptDebugServer.cpp
+    inspector/JSInjectedScriptHost.cpp
+    inspector/JSInjectedScriptHostPrototype.cpp
+    inspector/JSJavaScriptCallFrame.cpp
+    inspector/JSJavaScriptCallFramePrototype.cpp
+    inspector/JavaScriptCallFrame.cpp
+    inspector/PerGlobalObjectWrapperWorld.cpp
+    inspector/ScriptArguments.cpp
+    inspector/ScriptCallFrame.cpp
+    inspector/ScriptCallStack.cpp
+    inspector/ScriptCallStackFactory.cpp
+    inspector/ScriptDebugServer.cpp
+
+    inspector/agents/InspectorAgent.cpp
+    inspector/agents/InspectorConsoleAgent.cpp
+    inspector/agents/InspectorDebuggerAgent.cpp
+    inspector/agents/InspectorHeapAgent.cpp
+    inspector/agents/InspectorRuntimeAgent.cpp
+    inspector/agents/InspectorScriptProfilerAgent.cpp
+    inspector/agents/JSGlobalObjectConsoleAgent.cpp
+    inspector/agents/JSGlobalObjectDebuggerAgent.cpp
+    inspector/agents/JSGlobalObjectRuntimeAgent.cpp
+
+    interpreter/AbstractPC.cpp
+    interpreter/CLoopStack.cpp
+    interpreter/CallFrame.cpp
+    interpreter/Interpreter.cpp
+    interpreter/ProtoCallFrame.cpp
+    interpreter/ShadowChicken.cpp
+    interpreter/StackVisitor.cpp
+
+    jit/AssemblyHelpers.cpp
+    jit/BinarySwitch.cpp
+    jit/CCallHelpers.cpp
+    jit/CachedRecovery.cpp
+    jit/CallFrameShuffleData.cpp
+    jit/CallFrameShuffler.cpp
+    jit/CallFrameShuffler32_64.cpp
+    jit/CallFrameShuffler64.cpp
+    jit/ExecutableAllocationFuzz.cpp
+    jit/ExecutableAllocator.cpp
+    jit/ExecutableAllocatorFixedVMPool.cpp
+    jit/GCAwareJITStubRoutine.cpp
+    jit/GPRInfo.cpp
+    jit/HostCallReturnValue.cpp
+    jit/ICStats.cpp
+    jit/IntrinsicEmitter.cpp
+    jit/JIT.cpp
+    jit/JITAddGenerator.cpp
+    jit/JITArithmetic.cpp
+    jit/JITArithmetic32_64.cpp
+    jit/JITBitAndGenerator.cpp
+    jit/JITBitOrGenerator.cpp
+    jit/JITBitXorGenerator.cpp
+    jit/JITCall.cpp
+    jit/JITCall32_64.cpp
+    jit/JITCode.cpp
+    jit/JITDisassembler.cpp
+    jit/JITDivGenerator.cpp
+    jit/JITExceptions.cpp
+    jit/JITInlineCacheGenerator.cpp
+    jit/JITLeftShiftGenerator.cpp
+    jit/JITMulGenerator.cpp
+    jit/JITNegGenerator.cpp
+    jit/JITOpcodes.cpp
+    jit/JITOpcodes32_64.cpp
+    jit/JITOperations.cpp
+    jit/JITPropertyAccess.cpp
+    jit/JITPropertyAccess32_64.cpp
+    jit/JITRightShiftGenerator.cpp
+    jit/JITStubRoutine.cpp
+    jit/JITSubGenerator.cpp
+    jit/JITThunks.cpp
+    jit/JITToDFGDeferredCompilationCallback.cpp
+    jit/JITWorklist.cpp
+    jit/PCToCodeOriginMap.cpp
+    jit/PolymorphicCallStubRoutine.cpp
+    jit/Reg.cpp
+    jit/RegisterAtOffset.cpp
+    jit/RegisterAtOffsetList.cpp
+    jit/RegisterSet.cpp
+    jit/Repatch.cpp
+    jit/ScratchRegisterAllocator.cpp
+    jit/SetupVarargsFrame.cpp
+    jit/TagRegistersMode.cpp
+    jit/TempRegisterSet.cpp
+    jit/ThunkGenerators.cpp
+
+    llint/LLIntCLoop.cpp
+    llint/LLIntData.cpp
+    llint/LLIntEntrypoint.cpp
+    llint/LLIntExceptions.cpp
+    llint/LLIntSlowPaths.cpp
+    llint/LLIntThunks.cpp
+    llint/LowLevelInterpreter.cpp
+
+    parser/Lexer.cpp
+    parser/ModuleAnalyzer.cpp
+    parser/Nodes.cpp
+    parser/NodesAnalyzeModule.cpp
+    parser/Parser.cpp
+    parser/ParserArena.cpp
+    parser/SourceProvider.cpp
+    parser/SourceProviderCache.cpp
+    parser/UnlinkedSourceCode.cpp
+    parser/VariableEnvironment.cpp
+
+    profiler/ProfilerBytecode.cpp
+    profiler/ProfilerBytecodeSequence.cpp
+    profiler/ProfilerBytecodes.cpp
+    profiler/ProfilerCompilation.cpp
+    profiler/ProfilerCompilationKind.cpp
+    profiler/ProfilerCompiledBytecode.cpp
+    profiler/ProfilerDatabase.cpp
+    profiler/ProfilerEvent.cpp
+    profiler/ProfilerJettisonReason.cpp
+    profiler/ProfilerOSRExit.cpp
+    profiler/ProfilerOSRExitSite.cpp
+    profiler/ProfilerOrigin.cpp
+    profiler/ProfilerOriginStack.cpp
+    profiler/ProfilerProfiledBytecodes.cpp
+    profiler/ProfilerUID.cpp
+
+    runtime/AbstractModuleRecord.cpp
+    runtime/ArgList.cpp
+    runtime/ArrayBuffer.cpp
+    runtime/ArrayBufferNeuteringWatchpoint.cpp
+    runtime/ArrayBufferView.cpp
+    runtime/ArrayConstructor.cpp
+    runtime/ArrayConventions.cpp
+    runtime/ArrayIteratorAdaptiveWatchpoint.cpp
+    runtime/ArrayIteratorPrototype.cpp
+    runtime/ArrayPrototype.cpp
+    runtime/AtomicsObject.cpp
+    runtime/AsyncFunctionConstructor.cpp
+    runtime/AsyncFunctionPrototype.cpp
+    runtime/BasicBlockLocation.cpp
+    runtime/BooleanConstructor.cpp
+    runtime/BooleanObject.cpp
+    runtime/BooleanPrototype.cpp
+    runtime/CallData.cpp
+    runtime/CatchScope.cpp
+    runtime/ClonedArguments.cpp
+    runtime/CodeCache.cpp
+    runtime/CodeSpecializationKind.cpp
+    runtime/CommonIdentifiers.cpp
+    runtime/CommonSlowPaths.cpp
+    runtime/CommonSlowPathsExceptions.cpp
+    runtime/CompilationResult.cpp
+    runtime/Completion.cpp
+    runtime/ConsoleClient.cpp
+    runtime/ConsoleObject.cpp
+    runtime/ConstantMode.cpp
+    runtime/ConstructData.cpp
+    runtime/ControlFlowProfiler.cpp
+    runtime/CustomGetterSetter.cpp
+    runtime/DataView.cpp
+    runtime/DateConstructor.cpp
+    runtime/DateConversion.cpp
+    runtime/DateInstance.cpp
+    runtime/DatePrototype.cpp
+    runtime/DirectArguments.cpp
+    runtime/DirectArgumentsOffset.cpp
+    runtime/DirectEvalExecutable.cpp
+    runtime/DumpContext.cpp
+    runtime/ECMAScriptSpecInternalFunctions.cpp
+    runtime/Error.cpp
+    runtime/ErrorConstructor.cpp
+    runtime/ErrorHandlingScope.cpp
+    runtime/ErrorInstance.cpp
+    runtime/ErrorPrototype.cpp
+    runtime/EvalExecutable.cpp
+    runtime/Exception.cpp
+    runtime/ExceptionEventLocation.cpp
+    runtime/ExceptionFuzz.cpp
+    runtime/ExceptionHelpers.cpp
+    runtime/ExceptionScope.cpp
+    runtime/ExecutableBase.cpp
+    runtime/FunctionConstructor.cpp
+    runtime/FunctionExecutable.cpp
+    runtime/FunctionExecutableDump.cpp
+    runtime/FunctionHasExecutedCache.cpp
+    runtime/FunctionPrototype.cpp
+    runtime/FunctionRareData.cpp
+    runtime/GeneratorFunctionConstructor.cpp
+    runtime/GeneratorFunctionPrototype.cpp
+    runtime/GeneratorPrototype.cpp
+    runtime/GetterSetter.cpp
+    runtime/HashMapImpl.cpp
+    runtime/Identifier.cpp
+    runtime/IndexingType.cpp
+    runtime/IndirectEvalExecutable.cpp
+    runtime/InferredType.cpp
+    runtime/InferredTypeTable.cpp
+    runtime/InferredValue.cpp
+    runtime/InitializeThreading.cpp
+    runtime/InspectorInstrumentationObject.cpp
+    runtime/InternalFunction.cpp
+    runtime/IntlCollator.cpp
+    runtime/IntlCollatorConstructor.cpp
+    runtime/IntlCollatorPrototype.cpp
+    runtime/IntlDateTimeFormat.cpp
+    runtime/IntlDateTimeFormatConstructor.cpp
+    runtime/IntlDateTimeFormatPrototype.cpp
+    runtime/IntlNumberFormat.cpp
+    runtime/IntlNumberFormatConstructor.cpp
+    runtime/IntlNumberFormatPrototype.cpp
+    runtime/IntlObject.cpp
+    runtime/IteratorOperations.cpp
+    runtime/IteratorPrototype.cpp
+    runtime/JSAPIValueWrapper.cpp
+    runtime/JSArray.cpp
+    runtime/JSArrayBuffer.cpp
+    runtime/JSArrayBufferConstructor.cpp
+    runtime/JSArrayBufferPrototype.cpp
+    runtime/JSArrayBufferView.cpp
+    runtime/JSAsyncFunction.cpp
+    runtime/JSBoundFunction.cpp
+    runtime/JSCJSValue.cpp
+    runtime/JSCallee.cpp
+    runtime/JSCell.cpp
+    runtime/JSCustomGetterSetterFunction.cpp
+    runtime/JSDataView.cpp
+    runtime/JSDataViewPrototype.cpp
+    runtime/JSDateMath.cpp
+    runtime/JSDestructibleObjectSubspace.cpp
+    runtime/JSEnvironmentRecord.cpp
+    runtime/JSFixedArray.cpp
+    runtime/JSFunction.cpp
+    runtime/JSGeneratorFunction.cpp
+    runtime/JSGlobalLexicalEnvironment.cpp
+    runtime/JSGlobalObject.cpp
+    runtime/JSGlobalObjectDebuggable.cpp
+    runtime/JSGlobalObjectFunctions.cpp
+    runtime/JSInternalPromise.cpp
+    runtime/JSInternalPromiseConstructor.cpp
+    runtime/JSInternalPromiseDeferred.cpp
+    runtime/JSInternalPromisePrototype.cpp
+    runtime/JSJob.cpp
+    runtime/JSLexicalEnvironment.cpp
+    runtime/JSLock.cpp
+    runtime/JSMap.cpp
+    runtime/JSMapIterator.cpp
+    runtime/JSModuleEnvironment.cpp
+    runtime/JSModuleLoader.cpp
+    runtime/JSModuleNamespaceObject.cpp
+    runtime/JSModuleRecord.cpp
+    runtime/JSNativeStdFunction.cpp
+    runtime/JSONObject.cpp
+    runtime/JSObject.cpp
+    runtime/JSPromise.cpp
+    runtime/JSPromiseConstructor.cpp
+    runtime/JSPromiseDeferred.cpp
+    runtime/JSPromisePrototype.cpp
+    runtime/JSPropertyNameEnumerator.cpp
+    runtime/JSPropertyNameIterator.cpp
+    runtime/JSProxy.cpp
+    runtime/JSScope.cpp
+    runtime/JSSegmentedVariableObject.cpp
+    runtime/JSSet.cpp
+    runtime/JSSetIterator.cpp
+    runtime/JSString.cpp
+    runtime/JSStringIterator.cpp
+    runtime/JSStringJoiner.cpp
+    runtime/JSStringSubspace.cpp
+    runtime/JSSymbolTableObject.cpp
+    runtime/JSTemplateRegistryKey.cpp
+    runtime/JSTypedArrayConstructors.cpp
+    runtime/JSTypedArrayPrototypes.cpp
+    runtime/JSTypedArrayViewConstructor.cpp
+    runtime/JSTypedArrayViewPrototype.cpp
+    runtime/JSTypedArrays.cpp
+    runtime/JSWeakMap.cpp
+    runtime/JSWeakSet.cpp
+    runtime/JSWithScope.cpp
+    runtime/JSWrapperObject.cpp
+    runtime/LazyClassStructure.cpp
+    runtime/LiteralParser.cpp
+    runtime/Lookup.cpp
+    runtime/MapBase.cpp
+    runtime/MapConstructor.cpp
+    runtime/MapIteratorPrototype.cpp
+    runtime/MapPrototype.cpp
+    runtime/MatchResult.cpp
+    runtime/MathCommon.cpp
+    runtime/MathObject.cpp
+    runtime/MemoryStatistics.cpp
+    runtime/ModuleLoaderPrototype.cpp
+    runtime/ModuleProgramExecutable.cpp
+    runtime/NativeErrorConstructor.cpp
+    runtime/NativeErrorPrototype.cpp
+    runtime/NativeExecutable.cpp
+    runtime/NativeStdFunctionCell.cpp
+    runtime/NullGetterFunction.cpp
+    runtime/NullSetterFunction.cpp
+    runtime/NumberConstructor.cpp
+    runtime/NumberObject.cpp
+    runtime/NumberPrototype.cpp
+    runtime/ObjectConstructor.cpp
+    runtime/ObjectPrototype.cpp
+    runtime/Operations.cpp
+    runtime/Options.cpp
+    runtime/ProgramExecutable.cpp
+    runtime/PropertyDescriptor.cpp
+    runtime/PropertySlot.cpp
+    runtime/PropertyTable.cpp
+    runtime/PrototypeMap.cpp
+    runtime/ProxyConstructor.cpp
+    runtime/ProxyObject.cpp
+    runtime/ProxyRevoke.cpp
+    runtime/ReflectObject.cpp
+    runtime/RegExp.cpp
+    runtime/RegExpCache.cpp
+    runtime/RegExpCachedResult.cpp
+    runtime/RegExpConstructor.cpp
+    runtime/RegExpMatchesArray.cpp
+    runtime/RegExpObject.cpp
+    runtime/RegExpPrototype.cpp
+    runtime/RuntimeType.cpp
+    runtime/SamplingCounter.cpp
+    runtime/SamplingProfiler.cpp
+    runtime/ScopeOffset.cpp
+    runtime/ScopedArguments.cpp
+    runtime/ScopedArgumentsTable.cpp
+    runtime/ScriptExecutable.cpp
+    runtime/SetConstructor.cpp
+    runtime/SetIteratorPrototype.cpp
+    runtime/SetPrototype.cpp
+    runtime/SimpleTypedArrayController.cpp
+    runtime/SmallStrings.cpp
+    runtime/SparseArrayValueMap.cpp
+    runtime/StackFrame.cpp
+    runtime/StrictEvalActivation.cpp
+    runtime/StringConstructor.cpp
+    runtime/StringIteratorPrototype.cpp
+    runtime/StringObject.cpp
+    runtime/StringPrototype.cpp
+    runtime/StringRecursionChecker.cpp
+    runtime/Structure.cpp
+    runtime/StructureChain.cpp
+    runtime/StructureIDTable.cpp
+    runtime/StructureRareData.cpp
+    runtime/Symbol.cpp
+    runtime/SymbolConstructor.cpp
+    runtime/SymbolObject.cpp
+    runtime/SymbolPrototype.cpp
+    runtime/SymbolTable.cpp
+    runtime/TemplateRegistry.cpp
+    runtime/TemplateRegistryKey.cpp
+    runtime/TemplateRegistryKeyTable.cpp
+    runtime/TestRunnerUtils.cpp
+    runtime/ThrowScope.cpp
+    runtime/TypeLocationCache.cpp
+    runtime/TypeProfiler.cpp
+    runtime/TypeProfilerLog.cpp
+    runtime/TypeSet.cpp
+    runtime/TypedArrayController.cpp
+    runtime/TypedArrayType.cpp
+    runtime/TypeofType.cpp
+    runtime/VM.cpp
+    runtime/VMEntryScope.cpp
+    runtime/VarOffset.cpp
+    runtime/Watchdog.cpp
+    runtime/WeakMapConstructor.cpp
+    runtime/WeakMapData.cpp
+    runtime/WeakMapPrototype.cpp
+    runtime/WeakSetConstructor.cpp
+    runtime/WeakSetPrototype.cpp
+
+    tools/CodeProfile.cpp
+    tools/CodeProfiling.cpp
+    tools/FunctionOverrides.cpp
+    tools/FunctionWhitelist.cpp
+    tools/JSDollarVM.cpp
+    tools/JSDollarVMPrototype.cpp
+    tools/SigillCrashAnalyzer.cpp
+    tools/VMInspector.cpp
+
+    wasm/JSWebAssembly.cpp
+    wasm/WasmB3IRGenerator.cpp
+    wasm/WasmBinding.cpp
+    wasm/WasmCallingConvention.cpp
+    wasm/WasmFormat.cpp
+    wasm/WasmMemory.cpp
+    wasm/WasmMemoryInformation.cpp
+    wasm/WasmModuleParser.cpp
+    wasm/WasmPlan.cpp
+    wasm/WasmValidate.cpp
+
+    wasm/js/JSWebAssemblyCallee.cpp
+    wasm/js/JSWebAssemblyCompileError.cpp
+    wasm/js/JSWebAssemblyInstance.cpp
+    wasm/js/JSWebAssemblyMemory.cpp
+    wasm/js/JSWebAssemblyModule.cpp
+    wasm/js/JSWebAssemblyRuntimeError.cpp
+    wasm/js/JSWebAssemblyTable.cpp
+    wasm/js/WebAssemblyCompileErrorConstructor.cpp
+    wasm/js/WebAssemblyCompileErrorPrototype.cpp
+    wasm/js/WebAssemblyFunction.cpp
+    wasm/js/WebAssemblyInstanceConstructor.cpp
+    wasm/js/WebAssemblyInstancePrototype.cpp
+    wasm/js/WebAssemblyMemoryConstructor.cpp
+    wasm/js/WebAssemblyMemoryPrototype.cpp
+    wasm/js/WebAssemblyModuleConstructor.cpp
+    wasm/js/WebAssemblyModulePrototype.cpp
+    wasm/js/WebAssemblyModuleRecord.cpp
+    wasm/js/WebAssemblyPrototype.cpp
+    wasm/js/WebAssemblyRuntimeErrorConstructor.cpp
+    wasm/js/WebAssemblyRuntimeErrorPrototype.cpp
+    wasm/js/WebAssemblyTableConstructor.cpp
+    wasm/js/WebAssemblyTablePrototype.cpp
+    wasm/js/WebAssemblyToJSCallee.cpp
+
+    yarr/RegularExpression.cpp
+    yarr/YarrCanonicalizeUCS2.cpp
+    yarr/YarrInterpreter.cpp
+    yarr/YarrJIT.cpp
+    yarr/YarrPattern.cpp
+    yarr/YarrSyntaxChecker.cpp
+)
+
+# Extra flags for compile sources can go here.
+if (NOT MSVC)
+    set_source_files_properties(runtime/ProxyObject.cpp PROPERTIES COMPILE_FLAGS -fno-optimize-sibling-calls)
+else ()
+    # FIXME: Investigate if we need to set a similar flag on Windows.
+endif ()
+
+set(JavaScriptCore_OBJECT_LUT_SOURCES
+    runtime/ArrayConstructor.cpp
+    runtime/ArrayIteratorPrototype.cpp
+    runtime/BooleanPrototype.cpp
+    runtime/DateConstructor.cpp
+    runtime/DatePrototype.cpp
+    runtime/ErrorPrototype.cpp
+    runtime/GeneratorPrototype.cpp
+    runtime/InspectorInstrumentationObject.cpp
+    runtime/IntlCollatorConstructor.cpp
+    runtime/IntlCollatorPrototype.cpp
+    runtime/IntlDateTimeFormatConstructor.cpp
+    runtime/IntlDateTimeFormatPrototype.cpp
+    runtime/IntlNumberFormatConstructor.cpp
+    runtime/IntlNumberFormatPrototype.cpp
+    runtime/JSDataViewPrototype.cpp
+    runtime/JSGlobalObject.cpp
+    runtime/JSInternalPromiseConstructor.cpp
+    runtime/JSONObject.cpp
+    runtime/JSPromiseConstructor.cpp
+    runtime/JSPromisePrototype.cpp
+    runtime/MapPrototype.cpp
+    runtime/ModuleLoaderPrototype.cpp
+    runtime/NumberConstructor.cpp
+    runtime/NumberPrototype.cpp
+    runtime/ObjectConstructor.cpp
+    runtime/ReflectObject.cpp
+    runtime/RegExpConstructor.cpp
+    runtime/RegExpPrototype.cpp
+    runtime/SetPrototype.cpp
+    runtime/StringConstructor.cpp
+    runtime/StringIteratorPrototype.cpp
+    runtime/StringPrototype.cpp
+    runtime/SymbolConstructor.cpp
+    runtime/SymbolPrototype.cpp
+
+    wasm/js/WebAssemblyCompileErrorConstructor.cpp
+    wasm/js/WebAssemblyCompileErrorPrototype.cpp
+    wasm/js/WebAssemblyInstanceConstructor.cpp
+    wasm/js/WebAssemblyInstancePrototype.cpp
+    wasm/js/WebAssemblyMemoryConstructor.cpp
+    wasm/js/WebAssemblyMemoryPrototype.cpp
+    wasm/js/WebAssemblyModuleConstructor.cpp
+    wasm/js/WebAssemblyModulePrototype.cpp
+    wasm/js/WebAssemblyPrototype.cpp
+    wasm/js/WebAssemblyRuntimeErrorConstructor.cpp
+    wasm/js/WebAssemblyRuntimeErrorPrototype.cpp
+    wasm/js/WebAssemblyTableConstructor.cpp
+    wasm/js/WebAssemblyTablePrototype.cpp
+)
+
+set(JavaScriptCore_LIBRARIES
+    WTF${DEBUG_SUFFIX}
+    ${ICU_I18N_LIBRARIES}
+    ${LLVM_LIBRARIES}
+)
+
+set(JavaScriptCore_SCRIPTS_SOURCES_DIR "${JAVASCRIPTCORE_DIR}/Scripts")
+
+# Globbing relies on the fact that generator-specific file names are prefixed with their directory.
+# Top-level scripts should have a file extension, since they are invoked during the build.
+
+set(JavaScriptCore_SCRIPTS_SOURCES_PATHS
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/*.pl
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/*.py
+    ${JavaScriptCore_SCRIPTS_SOURCES_DIR}/builtins/builtins*.py
+)
+
+# Force JavaScriptCore to run scripts from the same staging path as WebCore.
+set(JavaScriptCore_SCRIPTS_DIR "${DERIVED_SOURCES_DIR}/ForwardingHeaders/JavaScriptCore/Scripts")
+
+file(MAKE_DIRECTORY ${JavaScriptCore_SCRIPTS_DIR})
+
+# The directory flattening performed below mirrors what the Mac port does with private headers.
+
+file(GLOB JavaScriptCore_SCRIPTS_SOURCES ${JavaScriptCore_SCRIPTS_SOURCES_PATHS})
+
+foreach (_file ${JavaScriptCore_SCRIPTS_SOURCES})
+    get_filename_component(_script "${_file}" NAME)
+    add_custom_command(
+        OUTPUT ${JavaScriptCore_SCRIPTS_DIR}/${_script}
+        MAIN_DEPENDENCY ${_file}
+        WORKING_DIRECTORY ${DERIVED_SOURCES_DIR}
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${_file} ${JavaScriptCore_SCRIPTS_DIR}/${_script}
+        VERBATIM)
+    list(APPEND JavaScriptCore_SCRIPTS ${JavaScriptCore_SCRIPTS_DIR}/${_script})
+endforeach ()
+
+set(UDIS_GEN_DEP
+    disassembler/udis86/ud_opcode.py
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.c ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.h
+    DEPENDS ${UDIS_GEN_DEP}
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/disassembler/udis86/ud_itab.py ${JAVASCRIPTCORE_DIR}/disassembler/udis86/optable.xml ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/udis86_itab.h
+)
+
+set(LLINT_ASM
+    llint/LowLevelInterpreter.asm
+    llint/LowLevelInterpreter32_64.asm
+    llint/LowLevelInterpreter64.asm
+)
+
+set(OFFLINE_ASM
+    offlineasm/arm.rb
+    offlineasm/arm64.rb
+    offlineasm/ast.rb
+    offlineasm/backends.rb
+    offlineasm/cloop.rb
+    offlineasm/config.rb
+    offlineasm/instructions.rb
+    offlineasm/mips.rb
+    offlineasm/offsets.rb
+    offlineasm/opt.rb
+    offlineasm/parser.rb
+    offlineasm/registers.rb
+    offlineasm/risc.rb
+    offlineasm/self_hash.rb
+    offlineasm/settings.rb
+    offlineasm/sh4.rb
+    offlineasm/transform.rb
+    offlineasm/x86.rb
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generate-bytecode-files
+    DEPENDS ${JAVASCRIPTCORE_DIR}/generate-bytecode-files bytecode/BytecodeList.json
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generate-bytecode-files --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.json
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/offlineasm/generate_offset_extractor.rb
+    DEPENDS ${LLINT_ASM} ${OFFLINE_ASM} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/generate_offset_extractor.rb -I${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h
+    VERBATIM)
+
+# We add the header files directly to the ADD_EXECUTABLE call instead of setting the
+# OBJECT_DEPENDS property in LLIntOffsetsExtractor.cpp because generate_offset_extractor.rb and
+# generate-bytecode-files may not regenerate the .h files in case the hash it calculates does not change.
+# In this case, if some of the dependencies specified in the ADD_CUSTOM_COMMANDs above have
+# changed the command will always be called because the mtime of the .h files will
+# always be older than that of their dependencies.
+# Additionally, setting the OBJECT_DEPENDS property will make the .h files a Makefile
+# dependency of both LLIntOffsetsExtractor and LLIntOffsetsExtractor.cpp, so the command will
+# actually be run multiple times!
+add_executable(LLIntOffsetsExtractor
+    ${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h
+)
+target_link_libraries(LLIntOffsetsExtractor WTF)
+
+# The build system will execute asm.rb every time LLIntOffsetsExtractor's mtime is newer than
+# LLIntAssembly.h's mtime. The problem we have here is: asm.rb has some built-in optimization
+# that generates a checksum of the LLIntOffsetsExtractor binary, if the checksum of the new
+# LLIntOffsetsExtractor matches, no output is generated. To make this target consistent and avoid
+# running this command for every build, we artificially update LLIntAssembly.h's mtime (using touch)
+# after every asm.rb run.
+if (MSVC)
+    set(LLIntOutput LowLevelInterpreterWin.asm)
+    set(OFFLINE_ASM_ARGS --assembler=MASM)
+else ()
+    set(LLIntOutput LLIntAssembly.h)
+endif ()
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput}
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/offlineasm/asm.rb
+    DEPENDS LLIntOffsetsExtractor ${LLINT_ASM} ${OFFLINE_ASM} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/offlineasm/asm.rb -I${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ ${JAVASCRIPTCORE_DIR}/llint/LowLevelInterpreter.asm $ ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput} ${OFFLINE_ASM_ARGS}
+    COMMAND ${CMAKE_COMMAND} -E touch_nocreate ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${LLIntOutput}
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+    VERBATIM)
+
+# The explanation for not making LLIntAssembly.h part of the OBJECT_DEPENDS property of some of
+# the .cpp files below is similar to the one in the previous comment. However, since these .cpp
+# files are used to build JavaScriptCore itself, we can just add LLIntAssembly.h to JSC_HEADERS
+# since it is used in the add_library() call at the end of this file.
+if (MSVC)
+    enable_language(ASM_MASM)
+    list(APPEND JavaScriptCore_SOURCES
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LowLevelInterpreterWin.asm
+    )
+    # Win32 needs /safeseh with assembly, but Win64 does not.
+    if (CMAKE_SIZEOF_VOID_P EQUAL 4)
+        set_source_files_properties(${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LowLevelInterpreterWin.asm
+            PROPERTIES COMPILE_FLAGS  "/safeseh"
+        )
+    endif ()
+else ()
+    list(APPEND JavaScriptCore_HEADERS
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h
+    )
+endif ()
+
+# WebAssembly generator
+
+macro(GENERATE_PYTHON _generator _input _output)
+    add_custom_command(
+        OUTPUT ${_output}
+        MAIN_DEPENDENCY ${_generator}
+        DEPENDS ${_input}
+        COMMAND ${PYTHON_EXECUTABLE} ${_generator} ${_input} ${_output}
+        VERBATIM)
+    list(APPEND JavaScriptCore_HEADERS ${_output})
+    ADD_SOURCE_DEPENDENCIES(${_input} ${_output})
+endmacro()
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmOpsHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmOps.h)
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmValidateInlinesHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmValidateInlines.h)
+GENERATE_PYTHON(${CMAKE_CURRENT_SOURCE_DIR}/wasm/generateWasmB3IRGeneratorInlinesHeader.py ${CMAKE_CURRENT_SOURCE_DIR}/wasm/wasm.json ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/WasmB3IRGeneratorInlines.h)
+
+# LUT generator
+
+set(HASH_LUT_GENERATOR ${CMAKE_CURRENT_SOURCE_DIR}/create_hash_table)
+macro(GENERATE_HASH_LUT _input _output)
+    add_custom_command(
+        OUTPUT ${_output}
+        MAIN_DEPENDENCY ${HASH_LUT_GENERATOR}
+        DEPENDS ${_input}
+        COMMAND ${PERL_EXECUTABLE} ${HASH_LUT_GENERATOR} ${_input} > ${_output}
+        VERBATIM)
+    list(APPEND JavaScriptCore_HEADERS ${_output})
+    ADD_SOURCE_DEPENDENCIES(${_input} ${_output})
+endmacro()
+
+# GENERATOR 1-A: LUT creator
+
+foreach (_file ${JavaScriptCore_OBJECT_LUT_SOURCES})
+    get_filename_component(_name ${_file} NAME_WE)
+    GENERATE_HASH_LUT(${CMAKE_CURRENT_SOURCE_DIR}/${_file} ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/${_name}.lut.h)
+endforeach ()
+
+set(JavaScriptCore_FORWARDING_HEADERS_DIRECTORIES
+    API
+    assembler
+    bindings
+    builtins
+    bytecode
+    debugger
+    dfg
+    disassembler
+    domjit
+    heap
+    inspector
+    interpreter
+    jit
+    llint
+    parser
+    profiler
+    replay
+    runtime
+    yarr
+
+    collector/handles
+
+    inspector/agents
+    inspector/augmentable
+    inspector/remote
+)
+
+# GENERATOR 1-B: particular LUT creator (for 1 file only)
+GENERATE_HASH_LUT(${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Lexer.lut.h)
+
+#GENERATOR: "RegExpJitTables.h": tables used by Yarr
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h
+    MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/create_regex_tables
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/create_regex_tables > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h
+    VERBATIM)
+ADD_SOURCE_DEPENDENCIES(${CMAKE_CURRENT_SOURCE_DIR}/yarr/YarrPattern.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/RegExpJitTables.h)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generateYarrCanonicalizeUnicode
+    DEPENDS ${JAVASCRIPTCORE_DIR}/ucd/CaseFolding.txt
+    COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generateYarrCanonicalizeUnicode ${JAVASCRIPTCORE_DIR}/ucd/CaseFolding.txt ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+    VERBATIM)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/YarrCanonicalizeUnicode.cpp
+)
+
+#GENERATOR: "KeywordLookup.h": keyword decision tree used by the lexer
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h
+    MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/KeywordLookupGenerator.py
+    DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/KeywordLookupGenerator.py ${CMAKE_CURRENT_SOURCE_DIR}/parser/Keywords.table > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h
+    VERBATIM)
+ADD_SOURCE_DEPENDENCIES(${CMAKE_CURRENT_SOURCE_DIR}/parser/Lexer.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/KeywordLookup.h)
+
+
+# Inspector Interfaces
+
+set(JavaScriptCore_INSPECTOR_SCRIPTS_DIR "${JAVASCRIPTCORE_DIR}/inspector/scripts")
+
+set(JavaScriptCore_INSPECTOR_PROTOCOL_SCRIPTS
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/generate-inspector-protocol-bindings.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/cpp_generator.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/cpp_generator_templates.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_js_backend_commands.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_backend_dispatcher_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_backend_dispatcher_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_frontend_dispatcher_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_frontend_dispatcher_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_protocol_types_header.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generate_cpp_protocol_types_implementation.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generator.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/generator_templates.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/__init__.py
+    ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/codegen/models.py
+)
+
+set(JavaScriptCore_INSPECTOR_DOMAINS
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/ApplicationCache.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/CSS.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Console.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOM.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOMDebugger.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/DOMStorage.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Database.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Debugger.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/GenericTypes.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Heap.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Inspector.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/LayerTree.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Network.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/OverlayTypes.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Page.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Runtime.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/ScriptProfiler.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Timeline.json
+    ${JAVASCRIPTCORE_DIR}/inspector/protocol/Worker.json
+)
+
+if (ENABLE_INDEXED_DATABASE)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/IndexedDB.json
+    )
+endif ()
+
+if (ENABLE_RESOURCE_USAGE)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/Memory.json
+    )
+endif ()
+
+if (ENABLE_WEB_REPLAY)
+    list(APPEND JavaScriptCore_INSPECTOR_DOMAINS
+        ${JAVASCRIPTCORE_DIR}/inspector/protocol/Replay.json
+    )
+endif ()
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    MAIN_DEPENDENCY ${JavaScriptCore_SCRIPTS_DIR}/generate-combined-inspector-json.py
+    DEPENDS ${JavaScriptCore_INSPECTOR_DOMAINS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/generate-combined-inspector-json.py ${JavaScriptCore_INSPECTOR_DOMAINS} > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    VERBATIM)
+
+# Inspector Backend Dispatchers, Frontend Dispatchers, Type Builders
+file(MAKE_DIRECTORY ${DERIVED_SOURCES_WEBINSPECTORUI_DIR}/UserInterface/Protocol)
+file(MAKE_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector)
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.cpp
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.h
+           ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendCommands.js
+    MAIN_DEPENDENCY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    DEPENDS ${JavaScriptCore_INSPECTOR_PROTOCOL_SCRIPTS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_INSPECTOR_SCRIPTS_DIR}/generate-inspector-protocol-bindings.py --outputDir "${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector" --framework JavaScriptCore ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/CombinedDomains.json
+    VERBATIM)
+
+# JSCBuiltins
+
+set(BUILTINS_GENERATOR_SCRIPTS
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generator.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_model.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_templates.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_combined_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_combined_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_separate_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_separate_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_wrapper_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_wrapper_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_internals_wrapper_header.py
+    ${JavaScriptCore_SCRIPTS_DIR}/builtins_generate_internals_wrapper_implementation.py
+    ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py
+    ${JavaScriptCore_SCRIPTS_DIR}/lazywriter.py
+)
+
+set(JavaScriptCore_BUILTINS_SOURCES
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayIteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ArrayPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/AsyncFunctionPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/DatePrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/FunctionPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GeneratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GlobalObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/GlobalOperations.js
+    ${JAVASCRIPTCORE_DIR}/builtins/InspectorInstrumentationObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/InternalPromiseConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/IteratorHelpers.js
+    ${JAVASCRIPTCORE_DIR}/builtins/IteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/MapPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ModuleLoaderPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/NumberConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/NumberPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ObjectConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromiseConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromiseOperations.js
+    ${JAVASCRIPTCORE_DIR}/builtins/PromisePrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/ReflectObject.js
+    ${JAVASCRIPTCORE_DIR}/builtins/RegExpPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/SetPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringIteratorPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/StringPrototype.js
+    ${JAVASCRIPTCORE_DIR}/builtins/TypedArrayConstructor.js
+    ${JAVASCRIPTCORE_DIR}/builtins/TypedArrayPrototype.js
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcode.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcodeGenerated.h
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/b3/air/AirOpcode.opcodes
+    DEPENDS ${JAVASCRIPTCORE_DIR}/b3/air/opcode_generator.rb
+    COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/b3/air/opcode_generator.rb ${JAVASCRIPTCORE_DIR}/b3/air/AirOpcode.opcodes VERBATIM
+    WORKING_DIRECTORY ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}
+)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcode.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/AirOpcodeGenerated.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.h
+    MAIN_DEPENDENCY ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py
+    DEPENDS ${JavaScriptCore_BUILTINS_SOURCES} ${BUILTINS_GENERATOR_SCRIPTS}
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/generate-js-builtins.py --framework JavaScriptCore --output-directory ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR} --combined ${JavaScriptCore_BUILTINS_SOURCES}
+    VERBATIM)
+
+list(APPEND JavaScriptCore_SOURCES
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.cpp
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.cpp
+)
+
+list(APPEND JavaScriptCore_HEADERS
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorBackendDispatchers.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorFrontendDispatchers.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/inspector/InspectorProtocolObjects.h
+    ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSCBuiltins.h
+)
+
+add_custom_command(
+    OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/inspector/InjectedScriptSource.js
+    DEPENDS ${JavaScriptCore_SCRIPTS_DIR}/xxd.pl ${JavaScriptCore_SCRIPTS_DIR}/jsmin.py
+    COMMAND ${CMAKE_COMMAND} -E echo "//# sourceURL=__InjectedScript_InjectedScriptSource.js" > ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    COMMAND ${PYTHON_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/jsmin.py < ${JAVASCRIPTCORE_DIR}/inspector/InjectedScriptSource.js >> ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js
+    COMMAND ${PERL_EXECUTABLE} ${JavaScriptCore_SCRIPTS_DIR}/xxd.pl InjectedScriptSource_js ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.min.js ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h
+    VERBATIM)
+
+list(APPEND JavaScriptCore_HEADERS ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InjectedScriptSource.h)
+
+# Web Replay inputs generator
+if (ENABLE_WEB_REPLAY)
+    set(JavaScript_WEB_REPLAY_INPUTS ${CMAKE_CURRENT_SOURCE_DIR}/replay/JSInputs.json)
+    add_custom_command(
+        OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.cpp
+        MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/replay/scripts/CodeGeneratorReplayInputs.py
+        DEPENDS ${JavaScript_WEB_REPLAY_INPUTS}
+        COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/replay/scripts/CodeGeneratorReplayInputs.py --outputDir ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/ --framework JavaScriptCore ${JavaScript_WEB_REPLAY_INPUTS}
+        VERBATIM)
+
+    list(APPEND JavaScriptCore_SOURCES
+        replay/EncodedValue.cpp
+        ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.cpp
+    )
+    list(APPEND JavaScriptCore_HEADERS ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/JSReplayInputs.h)
+endif ()
+
+if (WTF_CPU_ARM)
+elseif (WTF_CPU_ARM64)
+elseif (WTF_CPU_HPPA)
+elseif (WTF_CPU_PPC)
+elseif (WTF_CPU_PPC64)
+elseif (WTF_CPU_PPC64LE)
+elseif (WTF_CPU_S390)
+elseif (WTF_CPU_S390X)
+elseif (WTF_CPU_MIPS)
+elseif (WTF_CPU_SH4)
+elseif (WTF_CPU_X86)
+elseif (WTF_CPU_X86_64)
+    if (MSVC AND ENABLE_JIT)
+        add_custom_command(
+            OUTPUT ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj
+            MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/jit/JITStubsMSVC64.asm
+            COMMAND ml64 -nologo -c -Fo ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj ${JAVASCRIPTCORE_DIR}/jit/JITStubsMSVC64.asm
+            VERBATIM)
+
+        list(APPEND JavaScriptCore_SOURCES ${DERIVED_SOURCES_DIR}/JITStubsMSVC64.obj)
+    endif ()
+else ()
+    message(FATAL_ERROR "Unknown CPU")
+endif ()
+
+
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
+
+WEBKIT_CREATE_FORWARDING_HEADERS(JavaScriptCore DIRECTORIES ${JavaScriptCore_FORWARDING_HEADERS_DIRECTORIES} FILES ${JavaScriptCore_FORWARDING_HEADERS_FILES})
+
+target_include_directories(LLIntOffsetsExtractor PRIVATE ${JavaScriptCore_INCLUDE_DIRECTORIES})
+
+add_subdirectory(shell)
+
+WEBKIT_WRAP_SOURCELIST(${JavaScriptCore_SOURCES})
+WEBKIT_FRAMEWORK(JavaScriptCore)
+
+if (NOT "${PORT}" STREQUAL "Mac")
+    if (${JavaScriptCore_LIBRARY_TYPE} STREQUAL "SHARED")
+        POPULATE_LIBRARY_VERSION(JAVASCRIPTCORE)
+        set_target_properties(JavaScriptCore PROPERTIES VERSION ${JAVASCRIPTCORE_VERSION} SOVERSION ${JAVASCRIPTCORE_VERSION_MAJOR})
+        install(TARGETS JavaScriptCore DESTINATION "${LIB_INSTALL_DIR}")
+    endif ()
+endif ()
+
+# Force staging of shared scripts, even if they aren't directly used to build JavaScriptCore.
+
+add_custom_target(stageSharedScripts DEPENDS ${JavaScriptCore_SCRIPTS})
+add_dependencies(JavaScriptCore stageSharedScripts)
+
+if (MSVC)
+    add_custom_command(
+        TARGET JavaScriptCore
+        PRE_BUILD
+        COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/auto-version.pl ${DERIVED_SOURCES_DIR}
+        VERBATIM)
+
+    add_custom_command(
+        TARGET JavaScriptCore
+        POST_BUILD
+        COMMAND ${PERL_EXECUTABLE} ${WEBKIT_LIBRARIES_DIR}/tools/scripts/version-stamp.pl ${DERIVED_SOURCES_DIR} $
+        VERBATIM)
+endif ()
diff --git a/COPYING.LIB b/COPYING.LIB
new file mode 100644
index 0000000..87c4a33
--- /dev/null
+++ b/COPYING.LIB
@@ -0,0 +1,488 @@
+
+
+NOTE! The LGPL below is copyrighted by the Free Software Foundation, but 
+the instance of code that it refers to (the kde libraries) are copyrighted 
+by the authors who actually wrote it.
+				   
+---------------------------------------------------------------------------
+		  GNU LIBRARY GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+                    51 Franklin Street, Fifth Floor
+                    Boston, MA 02110-1301, USA.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL.  It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it.  You can use it for
+your libraries, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library.  If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software.  To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+  Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs.  This
+license, the GNU Library General Public License, applies to certain
+designated libraries.  This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+  The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it.  Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program.  However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+  Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries.  We
+concluded that weaker conditions might promote sharing better.
+
+  However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves.  This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them.  (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.)  The hope is that this
+will lead to faster development of free libraries.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+  Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+		  GNU LIBRARY GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License").  Each licensee is
+addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    c) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    d) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    
+    Copyright (C)   
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  , 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..6f8959c
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,26467 @@
+2017-02-23  Filip Pizlo  
+
+        verifyEdges should not run in release
+        
+
+        Reviewed by Keith Miller and Mark Lam.
+
+        * dfg/DFGAbstractInterpreterInlines.h:
+        (JSC::DFG::AbstractInterpreter::executeEffects):
+
+2017-02-23  Filip Pizlo  
+
+        Disable concurrent GC A:B testing.
+
+        * runtime/Options.h:
+
+2017-02-17  JF Bastien  
+
+        A/B test concurrent GC
+        https://bugs.webkit.org/show_bug.cgi?id=168453
+        
+
+        Reviewed by Phil Pizlo.
+
+        The concurrent GC may be causing more crashes, but it's hard to
+        tell. A/B test this by setting it off for 50% of users, based on
+        the UUID which crash tracer generates. From this we can look at
+        crashes and figure out whether the concurrent GC was on or off,
+        and derive whether fewer crashes occur when it's off.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+        * runtime/Options.h:
+
+2017-02-21  Matthew Hanson  
+
+        Merge r212692. rdar://problem/30475767
+
+    2017-02-20  Mark Lam  
+
+            [Re-landing] CachedCall should let GC know to keep its arguments alive.
+            https://bugs.webkit.org/show_bug.cgi?id=168567
+            
+
+            Reviewed by Saam Barati.
+
+            We fix this by having CachedCall use a MarkedArgumentBuffer to store its
+            arguments instead of a Vector.
+
+            Also declared CachedCall, MarkedArgumentBuffer, and ProtoCallFrame as
+            WTF_FORBID_HEAP_ALLOCATION because they rely on being stack allocated for
+            correctness.
+
+            Update: the original patch has a bug in MarkedArgumentBuffer::expandCapacity()
+            where it was copying and calling addMarkSet() on values in m_buffer beyond m_size
+            (up to m_capacity).  As a result, depending on the pre-existing values in
+            m_inlineBuffer, this may result in a computed Heap pointer that is wrong, and
+            subsequently, manifest as a crash.  This is likely to be the cause of the PLT
+            regression.
+
+            I don't have a new test for this fix because the issue relies on sufficiently bad
+            values randomly showing up in m_inlineBuffer when we do an ensureCapacity() which
+            calls expandCapacity().
+
+            * interpreter/CachedCall.h:
+            (JSC::CachedCall::CachedCall):
+            (JSC::CachedCall::call):
+            (JSC::CachedCall::clearArguments):
+            (JSC::CachedCall::appendArgument):
+            (JSC::CachedCall::setArgument): Deleted.
+            * interpreter/CallFrame.h:
+            (JSC::ExecState::emptyList):
+            * interpreter/Interpreter.cpp:
+            (JSC::Interpreter::prepareForRepeatCall):
+            * interpreter/Interpreter.h:
+            * interpreter/ProtoCallFrame.h:
+            * runtime/ArgList.cpp:
+            (JSC::MarkedArgumentBuffer::slowEnsureCapacity):
+            (JSC::MarkedArgumentBuffer::expandCapacity):
+            (JSC::MarkedArgumentBuffer::slowAppend):
+            * runtime/ArgList.h:
+            (JSC::MarkedArgumentBuffer::append):
+            (JSC::MarkedArgumentBuffer::ensureCapacity):
+            * runtime/StringPrototype.cpp:
+            (JSC::replaceUsingRegExpSearch):
+            * runtime/VM.cpp:
+            (JSC::VM::VM):
+            * runtime/VM.h:
+
+2017-02-20  Matthew Hanson  
+
+        Rollout r212660. rdar://problem/30553220
+
+2017-02-20  Matthew Hanson  
+
+        Rollout r212646. rdar://problem/30475767
+
+2017-02-17  Matthew Hanson  
+
+        A/B test concurrent GC
+        https://bugs.webkit.org/show_bug.cgi?id=168453
+        
+
+        Landed on behalf of JF Bastien.
+
+        Reviewed by Alexey Proskuryakov.
+
+        The concurrent GC may be causing more crashes, but it's hard to
+        tell. A/B test this by setting it off for 50% of users, based on
+        the UUID which crash tracer generates. From this we can look at
+        crashes and figure out whether the concurrent GC was on or off,
+        and derive whether fewer crashes occur when it's off.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+        * runtime/Options.h:
+
+2017-02-20  Matthew Hanson  
+
+        Merge r212618. rdar://problem/30475767
+
+    2017-02-19  Mark Lam  
+
+            CachedCall should let GC know to keep its arguments alive.
+            https://bugs.webkit.org/show_bug.cgi?id=168567
+            
+
+            Reviewed by Saam Barati.
+
+            We fix this by having CachedCall use a MarkedArgumentBuffer to store its
+            arguments instead of a Vector.
+
+            Also declared CachedCall, MarkedArgumentBuffer, and ProtoCallFrame as
+            WTF_FORBID_HEAP_ALLOCATION because they rely on being stack allocated for
+            correctness.
+
+            * interpreter/CachedCall.h:
+            (JSC::CachedCall::CachedCall):
+            (JSC::CachedCall::call):
+            (JSC::CachedCall::clearArguments):
+            (JSC::CachedCall::appendArgument):
+            (JSC::CachedCall::setArgument): Deleted.
+            * interpreter/CallFrame.h:
+            (JSC::ExecState::emptyList):
+            * interpreter/Interpreter.cpp:
+            (JSC::Interpreter::prepareForRepeatCall):
+            * interpreter/Interpreter.h:
+            * interpreter/ProtoCallFrame.h:
+            * runtime/ArgList.cpp:
+            (JSC::MarkedArgumentBuffer::expandCapacity):
+            * runtime/ArgList.h:
+            (JSC::MarkedArgumentBuffer::ensureCapacity):
+            * runtime/StringPrototype.cpp:
+            (JSC::replaceUsingRegExpSearch):
+            * runtime/VM.cpp:
+            (JSC::VM::VM):
+            * runtime/VM.h:
+
+2017-02-17  Matthew Hanson  
+
+        Merge r212177. rdar://problem/30205880
+
+    2017-02-10  Saam Barati  
+
+            Object allocation sinking phase doesn't properly handle control flow when emitting a PutHint of a materialized object into a PromotedHeapLocation of a still sunken object
+            https://bugs.webkit.org/show_bug.cgi?id=168140
+            
+
+            Reviewed by Filip Pizlo.
+
+            This patch fixes a bug in allocation sinking phase where
+            we don't properly handle control flow when materializing
+            an object and also PutHinting that materialization into
+            a still sunken object. We were performing the PutHint
+            for the materialization at the point of materialization,
+            however, we may have materialized along both edges
+            of a control flow diamond, in which case, we need to
+            also PutHint at the join point. Consider this program:
+
+            ```
+            bb#0:
+            b: PhantomActivation()
+            a: PhantomNewFunction()
+            c: PutHint(@a, @b, ActivationLoc)
+            Branch(#1, #2)
+
+            bb#1:
+            d: MaterializeActivation()
+            e: PutHint(@a, @d, ActivationLoc)
+            f: Upsilon(@d, ^p)
+            Jump(#3)
+
+            bb#2:
+            g: MaterializeActivation()
+            h: PutHint(@a, @g, ActivationLoc)
+            i: Upsilon(@d, ^p)
+            Jump(#3)
+
+            bb#3:
+            p: Phi()
+            // What is PromotedHeapLocation(@a, ActivationLoc) here?
+            // What would we do if we exited?
+            ```
+            Before this patch, we didn't perform a PutHint of the Phi.
+            However, we need to, otherwise when exit, we won't know
+            the value of PromotedHeapLocation(@a, ActivationLoc)
+
+            The program we need then, for correctness, is this:
+            ```
+            bb#0:
+            b: PhantomActivation()
+            a: PhantomNewFunction()
+            c: PutHint(@a, @b, ActivationLoc)
+            Branch(#1, #2)
+
+            bb#1:
+            d: MaterializeActivation()
+            e: PutHint(@a, @d, ActivationLoc)
+            f: Upsilon(@d, ^p)
+            Jump(#3)
+
+            bb#2:
+            g: MaterializeActivation()
+            h: PutHint(@a, @g, ActivationLoc)
+            i: Upsilon(@d, ^p)
+            Jump(#3)
+
+            bb#3:
+            p: Phi()
+            j: PutHint(@a, @p, ActivationLoc)
+            ```
+
+            This patch makes it so that we emit the necessary PutHint at node `j`.
+            I've also added more validation to the OSRAvailabilityAnalysisPhase
+            to catch this problem during validation.
+
+            * dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
+            (JSC::DFG::OSRAvailabilityAnalysisPhase::run):
+            * dfg/DFGObjectAllocationSinkingPhase.cpp:
+            * ftl/FTLOperations.cpp:
+            (JSC::FTL::operationMaterializeObjectInOSR):
+
+2017-02-17  Matthew Hanson  
+
+        Merge r212035. rdar://problem/30433204
+
+    2017-02-09  Filip Pizlo  
+
+            SharedArrayBuffer does not need to be in the transfer list
+            https://bugs.webkit.org/show_bug.cgi?id=168079
+
+            Reviewed by Geoffrey Garen and Keith Miller.
+
+            Exposes a simple shareWith() API for when you know you want to share the contents of
+            a shared buffer. Also a useful explicit operator bool.
+
+            * runtime/ArrayBuffer.cpp:
+            (JSC::ArrayBuffer::shareWith):
+            * runtime/ArrayBuffer.h:
+            (JSC::ArrayBufferContents::operator bool):
+
+2017-02-17  Matthew Hanson  
+
+        Merge r212146. rdar://problem/28656664
+
+    2017-02-10  Mark Lam  
+
+            StructureStubInfo::considerCaching() should write barrier its owner CodeBlock when buffering a new Structure.
+            https://bugs.webkit.org/show_bug.cgi?id=168137
+            
+
+            Reviewed by Filip Pizlo.
+
+            If we're adding a new structure to StructureStubInfo's bufferedStructures, we
+            should write barrier the StubInfo's owner CodeBlock because that structure may be
+            collected during the next GC.  Write barrier-ing the owner CodeBlock ensures that
+            CodeBlock::finalizeBaselineJITInlineCaches() is called on it during the GC,
+            which, in turn, gives the StructureStubInfo the opportunity to filter out the
+            dead structure.
+
+            * bytecode/StructureStubInfo.h:
+            (JSC::StructureStubInfo::considerCaching):
+            * jit/JITOperations.cpp:
+
+2017-02-16  Keith Miller  
+
+        Fix merge issue with r212085 (rdar://problem/29939864).
+
+        * runtime/JSFunction.cpp:
+        (JSC::JSFunction::callerGetter):
+
+2017-02-12  Babak Shafiei  
+
+        Merge r211609.
+
+    2017-02-02  Mark Lam  
+
+            Add a SIGILL crash analyzer to make debugging SIGILLs easier.
+            https://bugs.webkit.org/show_bug.cgi?id=167714
+            
+
+            Not reviewed.
+
+            Build fix for CLOOP build.
+
+            * tools/VMInspector.cpp:
+
+2017-02-09  Matthew Hanson  
+
+        Merge r212022. rdar://problem/30198083
+
+    2017-02-09  Mark Lam  
+
+            B3::Procedure::deleteOrphans() should neutralize upsilons with dead phis.
+            https://bugs.webkit.org/show_bug.cgi?id=167437
+            
+
+            Reviewed by Filip Pizlo.
+
+            * b3/B3Procedure.cpp:
+            (JSC::B3::Procedure::deleteOrphans):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r212021. rdar://problem/30149432
+
+    2017-02-09  Saam Barati  
+
+            Sloppy mode: We don't properly hoist functions names "arguments" when we have a non-simple parameter list
+            https://bugs.webkit.org/show_bug.cgi?id=167319
+            
+
+            Reviewed by Mark Lam.
+
+            When hoisting a function inside sloppy mode, we were assuming all "var"s are inside
+            what we call the "var" SymbolTableEntry. This was almost true, execpt for "arguments",
+            which has sufficiently weird behavior. "arguments" can be visible to the default
+            parameter expressions inside a function, therefore can't go inside the "var"
+            SymbolTableEntry since the parameter SymbolTableEntry comes before the "var"
+            SymbolTableEntry in the scope chain.  Therefore, if we hoist a function named
+            "arguments", then we must also look for that variable inside the parameter scope
+            stack entry.
+
+            * bytecompiler/BytecodeGenerator.cpp:
+            (JSC::BytecodeGenerator::hoistSloppyModeFunctionIfNecessary):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r212019. rdar://problem/30128133
+
+    2017-02-09  Mark Lam  
+
+            Fix max length check in ArrayPrototype.js' concatSlowPath().
+            https://bugs.webkit.org/show_bug.cgi?id=167270
+            
+
+            Reviewed by Filip Pizlo.
+
+            1. Fixed concatSlowPath() to ensure that the result array length does not exceed
+               @MAX_ARRAY_INDEX.  The old code was checking against @MAX_SAFE_INTEGER in some
+               cases, but this is overly permissive.
+
+            2. Changed concatSlowPath() to throw a RangeError instead of a TypeError to be
+               consistent with the C++ runtime functions in JSArray.cpp.
+
+            3. Changed the RangeError message in concatSlowPath() and JSArray.cpp to "Length
+               exceeded the maximum array length" when the error is that the result length
+               exceeds MAX_ARRAY_INDEX.  We do this for 2 reasons:
+               a. "Length exceeded the maximum array length" is more informative than
+                  "Invalid array length".
+               b. We want to use the same string consistently for the same error.
+
+               There are still 2 places in JSArray.cpp that still throws a RangeError with
+               message "Invalid array length".  In those cases, the error is not necessarily
+               due to the result length exceeding MAX_ARRAY_INDEX, but is due to attempting to
+               set a length value that is not an integer that fits in MAX_ARRAY_INDEX e.g.
+               an attempt to set a fractional length value.  Hence, "Invalid array length" is
+               appropriate for those cases.
+
+            4. Fixed JSArray::appendMemcpy() to handle overflows when computing the result
+               array length.
+
+            * builtins/ArrayPrototype.js:
+            (concatSlowPath):
+            * bytecode/BytecodeIntrinsicRegistry.cpp:
+            (JSC::BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry):
+            * bytecode/BytecodeIntrinsicRegistry.h:
+            * runtime/ArrayPrototype.cpp:
+            (JSC::concatAppendOne):
+            (JSC::arrayProtoPrivateFuncAppendMemcpy):
+            * runtime/JSArray.cpp:
+            (JSC::JSArray::appendMemcpy):
+            (JSC::JSArray::push):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r212015. rdar://problem/30054759
+
+    2017-02-09  Mark Lam  
+
+            Constructed object's global object should be the global object of the constructor.
+            https://bugs.webkit.org/show_bug.cgi?id=167121
+            
+
+            Reviewed by Filip Pizlo and Geoffrey Garen.
+
+            The realm (i.e. globalObject) of any object should be the same as the constructor
+            that instantiated the object.  Changed PrototypeMap::createEmptyStructure() to
+            be passed the correct globalObject to use instead of assuming it's the same one
+            as the prototype object.
+
+            * bytecode/CodeBlock.cpp:
+            (JSC::CodeBlock::finishCreation):
+            * bytecode/InternalFunctionAllocationProfile.h:
+            (JSC::InternalFunctionAllocationProfile::createAllocationStructureFromBase):
+            * bytecode/ObjectAllocationProfile.h:
+            (JSC::ObjectAllocationProfile::initialize):
+            * runtime/FunctionRareData.cpp:
+            (JSC::FunctionRareData::initializeObjectAllocationProfile):
+            * runtime/FunctionRareData.h:
+            (JSC::FunctionRareData::createInternalFunctionAllocationStructureFromBase):
+            * runtime/InternalFunction.cpp:
+            (JSC::InternalFunction::createSubclassStructure):
+            * runtime/IteratorOperations.cpp:
+            (JSC::createIteratorResultObjectStructure):
+            * runtime/JSBoundFunction.cpp:
+            (JSC::getBoundFunctionStructure):
+            * runtime/JSFunction.cpp:
+            (JSC::JSFunction::allocateAndInitializeRareData):
+            (JSC::JSFunction::initializeRareData):
+            * runtime/JSGlobalObject.cpp:
+            (JSC::JSGlobalObject::init):
+            * runtime/JSProxy.cpp:
+            (JSC::JSProxy::setTarget):
+            * runtime/ObjectConstructor.h:
+            (JSC::constructEmptyObject):
+            * runtime/PrototypeMap.cpp:
+            (JSC::PrototypeMap::createEmptyStructure):
+            (JSC::PrototypeMap::emptyStructureForPrototypeFromBaseStructure):
+            (JSC::PrototypeMap::emptyObjectStructureForPrototype):
+            (JSC::PrototypeMap::clearEmptyObjectStructureForPrototype):
+            * runtime/PrototypeMap.h:
+
+2017-02-09  Matthew Hanson  
+
+        Merge r212009. rdar://problem/29939864
+
+    2017-02-09  Keith Miller  
+
+            We should not allow Function.caller to be used on native functions
+            https://bugs.webkit.org/show_bug.cgi?id=165628
+
+            Reviewed by Mark Lam.
+
+            Also remove unneeded dynamic cast.
+
+            * runtime/JSFunction.cpp:
+            (JSC::RetrieveCallerFunctionFunctor::RetrieveCallerFunctionFunctor):
+            (JSC::JSFunction::callerGetter):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r211622. rdar://problem/30116072
+
+    2017-02-02  Andreas Kling  
+
+            [Mac] In-process memory pressure monitor for WebContent processes AKA websam
+            
+            
+
+            Reviewed by Antti Koivisto.
+
+            Remove the sloppy "max live heap size" mechanism from JSC in favor of the new
+            WebCore-side memory footprint monitor.
+
+            * heap/Heap.cpp:
+            (JSC::Heap::updateAllocationLimits):
+            (JSC::Heap::didExceedMaxLiveSize): Deleted.
+            * heap/Heap.h:
+            (JSC::Heap::setMaxLiveSize): Deleted.
+
+2017-02-09  Matthew Hanson  
+
+        Merge r211896. rdar://problem/29754721
+
+    2017-02-08  Saam Barati  
+
+            Air IRC might spill a terminal that produces a value after the terminal
+            https://bugs.webkit.org/show_bug.cgi?id=167919
+            
+
+            Reviewed by Filip Pizlo.
+
+            IRC may spill a value-producing terminal (a patchpoint can be a value-producing terminal).
+            It used to do this by placing the spill *after* the terminal. This produces an invalid
+            graph because no instructions are allowed after the terminal.
+
+            I fixed this bug by having a cleanup pass over the IR after IRC is done.
+            The pass detects this problem, and fixes it by moving the spill into the
+            successors. However, it is careful to detect when the edge to the
+            successor is a critical edge. If the value-producing patchpoint is
+            the only predecessor of the successor, it just moves the spill
+            code to the beginning of the successor. Otherwise, it's a critical
+            edge and it breaks it by adding a block that does the spilling then
+            jumps to the successor.
+
+            * b3/air/AirInsertionSet.cpp:
+            * b3/air/AirInsertionSet.h:
+            (JSC::B3::Air::InsertionSet::insertInsts):
+            * b3/air/AirIteratedRegisterCoalescing.cpp:
+            * b3/testb3.cpp:
+            (JSC::B3::testTerminalPatchpointThatNeedsToBeSpilled):
+            (JSC::B3::testTerminalPatchpointThatNeedsToBeSpilled2):
+            (JSC::B3::run):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r211642. rdar://problem/29542720
+
+    2017-02-03  Saam Barati  
+
+            When OSR entering to the baseline JIT from the LLInt for a ProgramCodeBlock we can skip compiling a lot of the program
+            https://bugs.webkit.org/show_bug.cgi?id=167725
+            
+
+            Reviewed by Michael Saboff.
+
+            We often want to baseline compile ProgramCode once we hit a loop in the LLInt.
+            However, some programs execute a non-trivial amount of code before the loop.
+            This code can never be executed again because ProgramCodeBlocks never run more
+            than once. We're wasting time and memory by compiling code that is unreachable
+            from the OSR entry destination. This patch fixes this by only compiling code
+            that is reachable from the OSR entry destination.
+
+            This is a speedup on Kraken/ai-astar for devices with limited CPUs (I've been
+            testing on devices with 2 CPUs). On ai-astar, we were spending 50-100ms compiling
+            a huge ProgramCodeBlock in the baseline JIT where the majority of the code
+            would never execute. If this compilation was kicked off on the main thread,
+            then we'd be stalled for a long time. If it were started on the baseline JITs
+            background compilation thread, we'd still waste 50-100ms in that thread, causing
+            all other baseline compilations to happen on the main thread.
+
+            * interpreter/Interpreter.cpp:
+            (JSC::Interpreter::executeProgram):
+            * interpreter/Interpreter.h:
+            * jit/JIT.cpp:
+            (JSC::JIT::JIT):
+            (JSC::JIT::privateCompileMainPass):
+            * jit/JIT.h:
+            (JSC::JIT::compile):
+            * jit/JITWorklist.cpp:
+            (JSC::JITWorklist::Plan::Plan):
+            (JSC::JITWorklist::Plan::compileNow):
+            (JSC::JITWorklist::compileLater):
+            (JSC::JITWorklist::compileNow):
+            * jit/JITWorklist.h:
+            * llint/LLIntSlowPaths.cpp:
+            (JSC::LLInt::jitCompileAndSetHeuristics):
+            (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+            * runtime/Completion.cpp:
+            (JSC::evaluate):
+
+2017-02-09  Matthew Hanson  
+
+        Merge r211603. rdar://problem/30318237
+
+    2017-02-02  Mark Lam  
+
+            Add a SIGILL crash analyzer to make debugging SIGILLs easier.
+            https://bugs.webkit.org/show_bug.cgi?id=167714
+            
+
+            Reviewed by Filip Pizlo.
+
+            The current implementation is only for X86_64 and ARM64 on OS(DARWIN).  The
+            analyzer is not enabled for all other ports.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * API/JSVirtualMachine.mm:
+            * assembler/ARM64Assembler.h:
+            (JSC::ARM64Assembler::illegalInstruction):
+            * assembler/MacroAssemblerARM64.h:
+            (JSC::MacroAssemblerARM64::illegalInstruction):
+            * assembler/MacroAssemblerX86Common.h:
+            (JSC::MacroAssemblerX86Common::illegalInstruction):
+            * assembler/X86Assembler.h:
+            (JSC::X86Assembler::illegalInstruction):
+            * heap/Heap.cpp:
+            (JSC::Heap::forEachCodeBlockIgnoringJITPlansImpl):
+            * heap/Heap.h:
+            * heap/HeapInlines.h:
+            (JSC::Heap::forEachCodeBlockIgnoringJITPlans):
+            * runtime/Options.cpp:
+            (JSC::Options::isAvailable):
+            (JSC::recomputeDependentOptions):
+            * runtime/Options.h:
+            * runtime/VM.cpp:
+            (JSC::VM::VM):
+            (JSC::VM::~VM):
+            * runtime/VM.h:
+            * tools/SigillCrashAnalyzer.cpp: Added.
+            (JSC::SignalContext::SignalContext):
+            (JSC::SignalContext::dump):
+            (JSC::handleCrash):
+            (JSC::initializeCrashHandler):
+            (JSC::ensureSigillCrashAnalyzer):
+            (JSC::SigillCrashAnalyzer::analyze):
+            (JSC::SigillCrashAnalyzer::dumpCodeBlock):
+            * tools/SigillCrashAnalyzer.h: Added.
+            * tools/VMInspector.cpp: Added.
+            (JSC::VMInspector::instance):
+            (JSC::VMInspector::add):
+            (JSC::VMInspector::remove):
+            (JSC::ensureIsSafeToLock):
+            * tools/VMInspector.h: Added.
+            (JSC::VMInspector::iterate):
+
+2017-02-06  Matthew Hanson  
+
+        Merge r211666. rdar://problem/30167791
+
+    2017-02-03  Joseph Pecoraro  
+
+            Unreviewed rollout of r211486, r211629.
+
+            Original change is not ideal and is causing issues.
+
+            * inspector/agents/InspectorHeapAgent.cpp:
+            (Inspector::SendGarbageCollectionEventsTask::SendGarbageCollectionEventsTask):
+            * runtime/InitializeThreading.cpp:
+            (JSC::initializeThreading):
+
+2017-02-05  Matthew Hanson  
+
+        Merge r211630. rdar://problem/30318237
+
+    2017-02-03  Csaba Osztrogonác  
+
+            [cmake] Unreviewed AArch64 buildfix after r211603.
+            https://bugs.webkit.org/show_bug.cgi?id=167714
+
+            * CMakeLists.txt:
+
+2017-02-05  Matthew Hanson  
+
+        Merge r211658. rdar://problem/29144126
+
+    2017-02-03  JF Bastien  
+
+            OSR entry: delay outer-loop compilation when at inner-loop
+            https://bugs.webkit.org/show_bug.cgi?id=167149
+
+            Reviewed by Filip Pizlo.
+
+            r211224 and r211461 were reverted because they caused massive
+            kraken/ai-astar regressions. This patch instead does the
+            minimally-disruptive change to fix the original bug as described
+            below, but omits extra tuning and refactoring which I had
+            before. I'll commit tuning and refactoring separately, if this
+            sticks. This patch is therefore very minimal, and layers carefully
+            on top of the complex spaghetti-logic. The only change it makes is
+            that it uses triggers to indicate to outer loops that they should
+            compile, which fixes the immediate bug and seems roughly perf
+            neutral (maybe a small gain on kraken sometimes, other times a
+            small regression as would be expected from slightly compiling
+            later). As opposed to r211461 this patch doesn't unconditionally
+            unset the trigger because it prevents further DFG executions from
+            entering. It therefore makes the trigger a tri-state enum class:
+            don't trigger, compilation done, start compilation. Only "start
+            compilation" gets reset to "don't trigger". "Compilation done"
+            does not (unless there's a problem compiling, then it gets set
+            back to "don't trigger").
+
+            As of https://bugs.webkit.org/show_bug.cgi?id=155217 OSR
+            compilation can be kicked off for an entry into an outer-loop,
+            while executing an inner-loop. This is desirable because often the
+            codegen from an inner-entry isn't as good as the codegen from an
+            outer-entry, but execution from an inner-loop is often pretty hot
+            and likely to kick off compilation. This approach provided nice
+            speedups on Kraken because we'd select to enter to the outer-loop
+            very reliably, which reduces variability (the inner-loop was
+            selected roughly 1/5 times from my unscientific measurements).
+
+            When compilation starts we take a snapshot of the JSValues at the
+            current execution state using OSR's recovery mechanism. These
+            values are passed to the compiler and are used as way to perform
+            type profiling, and could be used to observe cell types as well as
+            to perform predictions such as through constant propagation.
+
+            It's therefore desired to enter from the outer-loop when we can,
+            but we need to be executing from that location to capture the
+            right JSValues, otherwise we're confusing the compiler and giving
+            it inaccurate JSValues which can lead it to predict the wrong
+            things, leading to suboptimal code or recompilation due to
+            misprediction, or in super-corner-cases a crash.
+
+            DFG tier-up was added here:
+            https://bugs.webkit.org/show_bug.cgi?id=112838
+
+            * dfg/DFGJITCode.h:
+            * dfg/DFGJITCompiler.cpp:
+            (JSC::DFG::JITCompiler::JITCompiler):
+            * dfg/DFGOperations.cpp:
+            * dfg/DFGSpeculativeJIT64.cpp:
+            (JSC::DFG::SpeculativeJIT::compile):
+            * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::ToFTLForOSREntryDeferredCompilationCallback):
+            (JSC::DFG::RefToFTLForOSREntryDeferredCompilationCallback::create):
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
+            * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h:
+
+2017-02-02  Matthew Hanson  
+
+        Merge r211486. rdar://problem/30167791
+
+    2017-02-01  Joseph Pecoraro  
+
+            Web Inspector: Use guaranteed RunLoop instead of RunLoop::current for dispatching inspector GC event
+            https://bugs.webkit.org/show_bug.cgi?id=167683
+            
+
+            Reviewed by Timothy Hatcher.
+
+            * inspector/agents/InspectorHeapAgent.cpp:
+            (Inspector::SendGarbageCollectionEventsTask::SendGarbageCollectionEventsTask):
+            Use RunLoop::main instead of RunLoop::current which may go away.
+
+            * runtime/InitializeThreading.cpp:
+            (JSC::initializeThreading):
+            Ensure RunLoop::main is initialized when using JSC APIs.
+
+2017-02-02  Matthew Hanson  
+
+        Merge r211463. rdar://problem/30296879
+
+    2017-01-31  Filip Pizlo  
+
+            Make verifyEdge a RELEASE_ASSERT
+            
+
+            Rubber stamped by Saam Barati.
+
+            * dfg/DFGAbstractInterpreterInlines.h:
+            (JSC::DFG::AbstractInterpreter::executeEffects):
+
+2017-01-31  Matthew Hanson  
+
+        Merge r211385. rdar://problem/29738502
+
+    2017-01-30  Matt Baker  
+
+            Web Inspector: Need some limit on Async Call Stacks for async loops (rAF loops)
+            https://bugs.webkit.org/show_bug.cgi?id=165633
+            
+
+            Reviewed by Joseph Pecoraro.
+
+            This patch limits the memory used by the Inspector backend to store async
+            stack trace data.
+
+            Asynchronous stack traces are stored as a disjoint set of parent pointer
+            trees. Tree nodes represent asynchronous operations, and hold a copy of
+            the stack trace at the time the operation was scheduled. Each tree can
+            be regarded as a set of stack traces, stored as singly linked lists that
+            share part of their structure (specifically their tails). Traces belonging
+            to the same tree will at least share a common root. A stack trace begins
+            at a leaf node and follows the chain of parent pointers to the root of
+            of the tree. Leaf nodes always contain pending asynchronous calls.
+
+            When an asynchronous operation is scheduled with requestAnimationFrame,
+            setInterval, etc, a node is created containing the current call stack and
+            some bookkeeping data for the operation. An unique identifier comprised
+            of an operation type and callback identifier is mapped to the node. If
+            scheduling the callback was itself the result of an asynchronous call,
+            the node becomes a child of the node associated with that call, otherwise
+            it becomes the root of a new tree.
+
+            A node is either `pending`, `active`, `dispatched`, or `canceled`. Nodes
+            start out as pending. After a callback for a pending node is dispatched
+            the node is marked as such, unless it is a repeating callback such as
+            setInterval, in which case it remains pending. Once a node is no longer
+            pending it is removed, as long as it has no children. Since nodes are
+            reference counted, it is a property of the stack trace tree that nodes
+            that are no longer pending and have no children pointing to them will be
+            automatically pruned from the tree.
+
+            If an async operation is canceled (e.g. cancelTimeout), the associated
+            node is marked as such. If the callback is not being dispatched at the
+            time, and has no children, it is removed.
+
+            Because async operations can be chained indefinitely, stack traces are
+            limited to a maximum depth. The depth of a stack trace is equal to the
+            sum of the depths of its nodes, with a node's depth equal to the number
+            of frames in its associated call stack. For any stack trace,
+
+                S = { sퟶ, sퟷ, …, s푘 }, with endpoints sퟶ, s푘
+                depth(S) = depth(sퟶ) + depth(sퟷ) + … + depth(s푘)
+
+            A stack trace is truncated when it exceeds the maximum depth. Truncation
+            occurs on node boundaries, not call frames, consequently the maximum depth
+            is more of a target than a guarantee:
+
+                d = maximum stack trace depth
+                for all S, depth(S) ≤ d + depth(s푘)
+
+            Because nodes can belong to multiple stack traces, it may be necessary
+            to clone the tail of a stack trace being truncated to prevent other traces
+            from being effected.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * inspector/AsyncStackTrace.cpp: Added.
+            (Inspector::AsyncStackTrace::create):
+            (Inspector::AsyncStackTrace::AsyncStackTrace):
+            (Inspector::AsyncStackTrace::~AsyncStackTrace):
+            (Inspector::AsyncStackTrace::isPending):
+            (Inspector::AsyncStackTrace::isLocked):
+            (Inspector::AsyncStackTrace::willDispatchAsyncCall):
+            (Inspector::AsyncStackTrace::didDispatchAsyncCall):
+            (Inspector::AsyncStackTrace::didCancelAsyncCall):
+            (Inspector::AsyncStackTrace::buildInspectorObject):
+            (Inspector::AsyncStackTrace::truncate):
+            (Inspector::AsyncStackTrace::remove):
+            * inspector/AsyncStackTrace.h:
+            * inspector/agents/InspectorDebuggerAgent.cpp:
+            (Inspector::InspectorDebuggerAgent::didScheduleAsyncCall):
+            (Inspector::InspectorDebuggerAgent::didCancelAsyncCall):
+            (Inspector::InspectorDebuggerAgent::willDispatchAsyncCall):
+            (Inspector::InspectorDebuggerAgent::didDispatchAsyncCall):
+            (Inspector::InspectorDebuggerAgent::didPause):
+            (Inspector::InspectorDebuggerAgent::clearAsyncStackTraceData):
+            (Inspector::InspectorDebuggerAgent::buildAsyncStackTrace): Deleted.
+            (Inspector::InspectorDebuggerAgent::refAsyncCallData): Deleted.
+            (Inspector::InspectorDebuggerAgent::derefAsyncCallData): Deleted.
+            * inspector/agents/InspectorDebuggerAgent.h:
+            * inspector/protocol/Console.json:
+
+2017-01-31  Matthew Hanson  
+
+        Merge r211300. rdar://problem/30135571
+
+    2017-01-27  Filip Pizlo  
+
+            scanExternalRememberedSet needs to mergeIfNecessary
+            https://bugs.webkit.org/show_bug.cgi?id=167523
+
+            Reviewed by Keith Miller.
+
+            The protocol for opaque roots is that if you add to them outside of draining, then you need to call
+            mergeIfNecessary.
+
+            This means that every MarkingConstraint that adds opaque roots needs to mergeIfNecessary after.
+
+            scanExternalRememberedSet transitively calls addOpaqueRoot, is called from a MarkingConstraint, and
+            was missing a call to mergeIfNecessary. This fixes it.
+
+            * API/JSVirtualMachine.mm:
+            (scanExternalRememberedSet):
+
+2017-01-27  Matthew Hanson  
+
+        Merge r211194. rdar://problem/30201008
+
+    2017-01-25  Filip Pizlo  
+
+            jsc.cpp should have the $.agent stuff for testing SAB
+            https://bugs.webkit.org/show_bug.cgi?id=167431
+
+            Reviewed by Saam Barati.
+
+            This adds some stuff that the SAB branch of test262 needs. None of this is exposed except for our
+            own tests and the SAB branch of test262. We now pass all of the Atomics tests in the SAB branch
+            of test262.
+
+            * jsc.cpp:
+            (Message::releaseContents):
+            (Message::index):
+            (GlobalObject::finishCreation):
+            (GlobalObject::addFunction):
+            (Message::Message):
+            (Message::~Message):
+            (Worker::Worker):
+            (Worker::~Worker):
+            (Worker::send):
+            (Worker::receive):
+            (Worker::current):
+            (Worker::currentWorker):
+            (Workers::Workers):
+            (Workers::~Workers):
+            (Workers::broadcast):
+            (Workers::report):
+            (Workers::tryGetReport):
+            (Workers::getReport):
+            (Workers::singleton):
+            (functionDollarCreateRealm):
+            (functionDollarDetachArrayBuffer):
+            (functionDollarEvalScript):
+            (functionDollarAgentStart):
+            (functionDollarAgentReceiveBroadcast):
+            (functionDollarAgentReport):
+            (functionDollarAgentSleep):
+            (functionDollarAgentBroadcast):
+            (functionDollarAgentGetReport):
+            (functionWaitForReport):
+            (checkException):
+            (runWithScripts):
+            (runJSC):
+            (jscmain):
+            * runtime/JSArrayBuffer.h:
+
+2017-01-27  Matthew Hanson  
+
+        Rollout r211258. rdar://problem/29144126
+
+2017-01-27  Matthew Hanson  
+
+        Merge r211237. rdar://problem/30179506
+
+    2017-01-26  Saam Barati  
+
+            Harden how the compiler references GC objects
+            https://bugs.webkit.org/show_bug.cgi?id=167277
+            
+
+            Reviewed by Filip Pizlo.
+
+            Since r210971, the DFG/FTL will flash safepoints before
+            each phase. This means that there are more opportunities for
+            a GC to happen while the compiler is running. Because of this,
+            the compiler must keep track of all the heap pointers that are part
+            of the Graph data structure. To accomplish this, I've designed
+            a new type called RegisteredStructure that can only be constructed
+            after the Graph becomes aware of its underlying Structure*. I
+            designed this new type to have the type system in C++ help us catch
+            errors where we're not informing the graph/plan of a heap pointer.
+            I've made it a compile error to create an OpInfo with a pointer
+            T* where T inherits from HeapCell. This encourages an OpInfo
+            to be created with either a FrozenValue* or a RegisteredStructure.
+            I've added similar compile time assertions for TrustedImmPtr in DFG::SpeculativeJIT
+            and FTL::Output::constIntPtr. These static asserts don't save us from all bad
+            programs because there are ways to write code that's incorrect that compiles,
+            but the new types do help us ensure that the most obvious way of writing the
+            code is correct.
+
+            The reason this patch is so big is that I've strung RegisteredStructure and
+            RegisteredStructureSet through the entire DFG/FTL.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * bytecode/CodeBlock.cpp:
+            (JSC::CodeBlock::determineLiveness):
+            * bytecode/StructureSet.cpp:
+            (JSC::StructureSet::filter): Deleted.
+            (JSC::StructureSet::filterArrayModes): Deleted.
+            (JSC::StructureSet::speculationFromStructures): Deleted.
+            (JSC::StructureSet::arrayModesFromStructures): Deleted.
+            (JSC::StructureSet::validateReferences): Deleted.
+            * bytecode/StructureSet.h:
+            * dfg/DFGAbstractInterpreter.h:
+            (JSC::DFG::AbstractInterpreter::filter):
+            * dfg/DFGAbstractInterpreterInlines.h:
+            (JSC::DFG::AbstractInterpreter::booleanResult):
+            (JSC::DFG::isToThisAnIdentity):
+            (JSC::DFG::AbstractInterpreter::executeEffects):
+            (JSC::DFG::AbstractInterpreter::observeTransition):
+            (JSC::DFG::AbstractInterpreter::filter):
+            * dfg/DFGAbstractValue.cpp:
+            (JSC::DFG::AbstractValue::set):
+            (JSC::DFG::AbstractValue::setType):
+            (JSC::DFG::AbstractValue::mergeOSREntryValue):
+            (JSC::DFG::AbstractValue::filter):
+            (JSC::DFG::AbstractValue::changeStructure):
+            (JSC::DFG::AbstractValue::contains):
+            * dfg/DFGAbstractValue.h:
+            (JSC::DFG::AbstractValue::observeTransition):
+            (JSC::DFG::AbstractValue::TransitionObserver::TransitionObserver):
+            * dfg/DFGArgumentsEliminationPhase.cpp:
+            * dfg/DFGArrayMode.cpp:
+            (JSC::DFG::ArrayMode::alreadyChecked):
+            * dfg/DFGArrayifySlowPathGenerator.h:
+            (JSC::DFG::ArrayifySlowPathGenerator::ArrayifySlowPathGenerator):
+            * dfg/DFGByteCodeParser.cpp:
+            (JSC::DFG::ByteCodeParser::handleConstantInternalFunction):
+            (JSC::DFG::ByteCodeParser::load):
+            (JSC::DFG::ByteCodeParser::handleGetById):
+            (JSC::DFG::ByteCodeParser::handlePutById):
+            (JSC::DFG::ByteCodeParser::parseBlock):
+            (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+            * dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
+            (JSC::DFG::CallArrayAllocatorSlowPathGenerator::CallArrayAllocatorSlowPathGenerator):
+            (JSC::DFG::CallArrayAllocatorWithVariableSizeSlowPathGenerator::CallArrayAllocatorWithVariableSizeSlowPathGenerator):
+            * dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h:
+            (JSC::DFG::CallCreateDirectArgumentsSlowPathGenerator::CallCreateDirectArgumentsSlowPathGenerator):
+            * dfg/DFGCommonData.cpp:
+            (JSC::DFG::CommonData::notifyCompilingStructureTransition):
+            * dfg/DFGConstantFoldingPhase.cpp:
+            (JSC::DFG::ConstantFoldingPhase::foldConstants):
+            (JSC::DFG::ConstantFoldingPhase::emitGetByOffset):
+            (JSC::DFG::ConstantFoldingPhase::emitPutByOffset):
+            (JSC::DFG::ConstantFoldingPhase::addBaseCheck):
+            (JSC::DFG::ConstantFoldingPhase::addStructureTransitionCheck):
+            * dfg/DFGDesiredWeakReferences.cpp:
+            (JSC::DFG::DesiredWeakReferences::reallyAdd):
+            * dfg/DFGFixupPhase.cpp:
+            (JSC::DFG::FixupPhase::checkArray):
+            * dfg/DFGGraph.cpp:
+            (JSC::DFG::Graph::Graph):
+            (JSC::DFG::Graph::dump):
+            (JSC::DFG::Graph::tryGetConstantProperty):
+            (JSC::DFG::Graph::inferredValueForProperty):
+            (JSC::DFG::Graph::visitChildren):
+            (JSC::DFG::Graph::freeze):
+            (JSC::DFG::Graph::registerStructure):
+            (JSC::DFG::Graph::assertIsRegistered):
+            * dfg/DFGGraph.h:
+            (JSC::DFG::Graph::registerStructure):
+            (JSC::DFG::Graph::addStructureSet):
+            * dfg/DFGJITCompiler.h:
+            (JSC::DFG::JITCompiler::branchWeakStructure):
+            * dfg/DFGMultiGetByOffsetData.cpp:
+            (JSC::DFG::MultiGetByOffsetCase::dumpInContext):
+            * dfg/DFGMultiGetByOffsetData.h:
+            (JSC::DFG::MultiGetByOffsetCase::MultiGetByOffsetCase):
+            (JSC::DFG::MultiGetByOffsetCase::set):
+            * dfg/DFGNode.cpp:
+            (JSC::DFG::Node::convertToPutStructureHint):
+            * dfg/DFGNode.h:
+            (JSC::DFG::Node::convertToCheckStructure):
+            (JSC::DFG::Node::structureSet):
+            (JSC::DFG::Node::structure):
+            (JSC::DFG::Node::OpInfoWrapper::OpInfoWrapper):
+            (JSC::DFG::Node::OpInfoWrapper::operator=):
+            (JSC::DFG::Node::OpInfoWrapper::asRegisteredStructure):
+            * dfg/DFGObjectAllocationSinkingPhase.cpp:
+            * dfg/DFGOpInfo.h:
+            (JSC::DFG::OpInfo::OpInfo):
+            * dfg/DFGPlan.cpp:
+            (JSC::DFG::Plan::compileInThreadImpl):
+            (JSC::DFG::Plan::finalizeWithoutNotifyingCallback):
+            * dfg/DFGRegisteredStructure.h: Added.
+            (JSC::DFG::RegisteredStructure::get):
+            (JSC::DFG::RegisteredStructure::operator->):
+            (JSC::DFG::RegisteredStructure::operator==):
+            (JSC::DFG::RegisteredStructure::operator!=):
+            (JSC::DFG::RegisteredStructure::operator bool):
+            (JSC::DFG::RegisteredStructure::RegisteredStructure):
+            (JSC::DFG::RegisteredStructure::createPrivate):
+            * dfg/DFGRegisteredStructureSet.cpp: Added.
+            (JSC::DFG::RegisteredStructureSet::filter):
+            (JSC::DFG::RegisteredStructureSet::filterArrayModes):
+            (JSC::DFG::RegisteredStructureSet::speculationFromStructures):
+            (JSC::DFG::RegisteredStructureSet::arrayModesFromStructures):
+            (JSC::DFG::RegisteredStructureSet::validateReferences):
+            * dfg/DFGRegisteredStructureSet.h: Added.
+            (JSC::DFG::RegisteredStructureSet::RegisteredStructureSet):
+            (JSC::DFG::RegisteredStructureSet::onlyStructure):
+            (JSC::DFG::RegisteredStructureSet::toStructureSet):
+            * dfg/DFGSafeToExecute.h:
+            (JSC::DFG::safeToExecute):
+            * dfg/DFGSpeculativeJIT.cpp:
+            (JSC::DFG::SpeculativeJIT::emitAllocateRawObject):
+            (JSC::DFG::SpeculativeJIT::emitGetCallee):
+            (JSC::DFG::SpeculativeJIT::silentFill):
+            (JSC::DFG::SpeculativeJIT::checkArray):
+            (JSC::DFG::SpeculativeJIT::compileGetByValOnString):
+            (JSC::DFG::SpeculativeJIT::compileFromCharCode):
+            (JSC::DFG::SpeculativeJIT::compileDoubleRep):
+            (JSC::DFG::compileClampDoubleToByte):
+            (JSC::DFG::SpeculativeJIT::compileMakeRope):
+            (JSC::DFG::SpeculativeJIT::compileArithRounding):
+            (JSC::DFG::SpeculativeJIT::compileNewFunctionCommon):
+            (JSC::DFG::SpeculativeJIT::compileNewFunction):
+            (JSC::DFG::SpeculativeJIT::compileCreateActivation):
+            (JSC::DFG::SpeculativeJIT::compileCreateDirectArguments):
+            (JSC::DFG::SpeculativeJIT::compileCreateScopedArguments):
+            (JSC::DFG::SpeculativeJIT::compileCreateClonedArguments):
+            (JSC::DFG::SpeculativeJIT::compileSpread):
+            (JSC::DFG::SpeculativeJIT::compileArraySlice):
+            (JSC::DFG::SpeculativeJIT::compileTypeOf):
+            (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
+            (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
+            (JSC::DFG::SpeculativeJIT::compileToStringOrCallStringConstructorOnCell):
+            (JSC::DFG::SpeculativeJIT::compileNewTypedArray):
+            (JSC::DFG::SpeculativeJIT::speculateStringOrStringObject):
+            (JSC::DFG::SpeculativeJIT::compileMaterializeNewObject):
+            * dfg/DFGSpeculativeJIT.h:
+            (JSC::DFG::SpeculativeJIT::TrustedImmPtr::TrustedImmPtr):
+            (JSC::DFG::SpeculativeJIT::TrustedImmPtr::weakPointer):
+            (JSC::DFG::SpeculativeJIT::TrustedImmPtr::operator MacroAssembler::TrustedImmPtr):
+            (JSC::DFG::SpeculativeJIT::TrustedImmPtr::asIntptr):
+            (JSC::DFG::SpeculativeJIT::callOperation):
+            (JSC::DFG::SpeculativeJIT::emitAllocateDestructibleObject):
+            (JSC::DFG::SpeculativeJIT::speculateStringObjectForStructure):
+            * dfg/DFGSpeculativeJIT32_64.cpp:
+            (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
+            (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
+            (JSC::DFG::SpeculativeJIT::emitCall):
+            (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+            (JSC::DFG::SpeculativeJIT::compileObjectOrOtherLogicalNot):
+            (JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
+            (JSC::DFG::SpeculativeJIT::compile):
+            (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+            * dfg/DFGSpeculativeJIT64.cpp:
+            (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
+            (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
+            (JSC::DFG::SpeculativeJIT::emitCall):
+            (JSC::DFG::SpeculativeJIT::compileObjectOrOtherLogicalNot):
+            (JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
+            (JSC::DFG::SpeculativeJIT::compile):
+            (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+            * dfg/DFGStrengthReductionPhase.cpp:
+            (JSC::DFG::StrengthReductionPhase::handleNode):
+            * dfg/DFGStructureAbstractValue.cpp:
+            (JSC::DFG::StructureAbstractValue::assertIsRegistered):
+            (JSC::DFG::StructureAbstractValue::clobber):
+            (JSC::DFG::StructureAbstractValue::observeTransition):
+            (JSC::DFG::StructureAbstractValue::observeTransitions):
+            (JSC::DFG::StructureAbstractValue::add):
+            (JSC::DFG::StructureAbstractValue::merge):
+            (JSC::DFG::StructureAbstractValue::mergeNotTop):
+            (JSC::DFG::StructureAbstractValue::filter):
+            (JSC::DFG::StructureAbstractValue::filterSlow):
+            (JSC::DFG::StructureAbstractValue::filterClassInfoSlow):
+            (JSC::DFG::StructureAbstractValue::contains):
+            (JSC::DFG::StructureAbstractValue::isSubsetOf):
+            (JSC::DFG::StructureAbstractValue::isSupersetOf):
+            (JSC::DFG::StructureAbstractValue::overlaps):
+            (JSC::DFG::StructureAbstractValue::isSubClassOf):
+            (JSC::DFG::StructureAbstractValue::dumpInContext):
+            * dfg/DFGStructureAbstractValue.h:
+            (JSC::DFG::StructureAbstractValue::StructureAbstractValue):
+            (JSC::DFG::StructureAbstractValue::operator=):
+            (JSC::DFG::StructureAbstractValue::set):
+            (JSC::DFG::StructureAbstractValue::toStructureSet):
+            (JSC::DFG::StructureAbstractValue::at):
+            (JSC::DFG::StructureAbstractValue::operator[]):
+            (JSC::DFG::StructureAbstractValue::onlyStructure):
+            * dfg/DFGStructureRegistrationPhase.cpp:
+            (JSC::DFG::StructureRegistrationPhase::StructureRegistrationPhase): Deleted.
+            (JSC::DFG::StructureRegistrationPhase::run): Deleted.
+            (JSC::DFG::StructureRegistrationPhase::registerStructures): Deleted.
+            (JSC::DFG::StructureRegistrationPhase::registerStructure): Deleted.
+            (JSC::DFG::StructureRegistrationPhase::assertAreRegistered): Deleted.
+            (JSC::DFG::StructureRegistrationPhase::assertIsRegistered): Deleted.
+            (JSC::DFG::performStructureRegistration): Deleted.
+            * dfg/DFGStructureRegistrationPhase.h:
+            * dfg/DFGTransition.cpp:
+            (JSC::DFG::Transition::dumpInContext):
+            * dfg/DFGTransition.h:
+            (JSC::DFG::Transition::Transition):
+            * dfg/DFGTypeCheckHoistingPhase.cpp:
+            (JSC::DFG::TypeCheckHoistingPhase::noticeStructureCheck):
+            (JSC::DFG::TypeCheckHoistingPhase::noticeStructureCheckAccountingForArrayMode):
+            * dfg/DFGValidate.cpp:
+            * ftl/FTLLowerDFGToB3.cpp:
+            (JSC::FTL::DFG::LowerDFGToB3::lower):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCallObjectConstructor):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCheckStructure):
+            (JSC::FTL::DFG::LowerDFGToB3::compilePutStructure):
+            (JSC::FTL::DFG::LowerDFGToB3::compileArraySlice):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCreateActivation):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewFunction):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCreateDirectArguments):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCreateRest):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewArray):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewArrayWithSpread):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewArrayBuffer):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewArrayWithSize):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewTypedArray):
+            (JSC::FTL::DFG::LowerDFGToB3::compileAllocatePropertyStorage):
+            (JSC::FTL::DFG::LowerDFGToB3::compileReallocatePropertyStorage):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMultiGetByOffset):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMultiPutByOffset):
+            (JSC::FTL::DFG::LowerDFGToB3::compileGetMapBucket):
+            (JSC::FTL::DFG::LowerDFGToB3::compileOverridesHasInstance):
+            (JSC::FTL::DFG::LowerDFGToB3::compileCheckStructureImmediate):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeCreateActivation):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewRegexp):
+            (JSC::FTL::DFG::LowerDFGToB3::compileLogShadowChickenTail):
+            (JSC::FTL::DFG::LowerDFGToB3::checkStructure):
+            (JSC::FTL::DFG::LowerDFGToB3::checkInferredType):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateVariableSizedObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateJSArray):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateUninitializedContiguousJSArray):
+            (JSC::FTL::DFG::LowerDFGToB3::boolify):
+            (JSC::FTL::DFG::LowerDFGToB3::equalNullOrUndefined):
+            (JSC::FTL::DFG::LowerDFGToB3::lowCell):
+            (JSC::FTL::DFG::LowerDFGToB3::speculateStringObjectForStructureID):
+            (JSC::FTL::DFG::LowerDFGToB3::weakPointer):
+            (JSC::FTL::DFG::LowerDFGToB3::frozenPointer):
+            (JSC::FTL::DFG::LowerDFGToB3::weakStructureID):
+            (JSC::FTL::DFG::LowerDFGToB3::weakStructure):
+            (JSC::FTL::DFG::LowerDFGToB3::crash):
+            * ftl/FTLOutput.h:
+            (JSC::FTL::Output::weakPointer):
+            (JSC::FTL::Output::constIntPtr):
+
+2017-01-27  Matthew Hanson  
+
+        Merge r211246. rdar://problem/29916672
+
+    2017-01-26  Mark Lam  
+
+            Fix missing exception check in genericTypedArrayViewProtoFuncSet().
+            https://bugs.webkit.org/show_bug.cgi?id=166812
+            
+
+            Reviewed by Saam Barati.
+
+            * runtime/JSGenericTypedArrayViewPrototypeFunctions.h:
+            (JSC::genericTypedArrayViewProtoFuncSet):
+
+2017-01-26  Matthew Hanson  
+
+        Merge r211224. rdar://problem/29144126
+
+    2017-01-26  JF Bastien  
+
+            OSR entry: delay outer-loop compilation when at inner-loop
+            https://bugs.webkit.org/show_bug.cgi?id=167149
+
+            Reviewed by Filip Pizlo.
+
+            As of https://bugs.webkit.org/show_bug.cgi?id=155217 OSR
+            compilation can be kicked off for an entry into an outer-loop,
+            while executing an inner-loop. This is desirable because often the
+            codegen from an inner-entry isn't as good as the codegen from an
+            outer-entry, but execution from an inner-loop is often pretty hot
+            and likely to kick off compilation. This approach provided nice
+            speedups on Kraken because we'd select to enter to the outer-loop
+            very reliably, which reduces variability (the inner-loop was
+            selected roughly 1/5 times from my unscientific measurements).
+
+            When compilation starts we take a snapshot of the JSValues at the
+            current execution state using OSR's recovery mechanism. These
+            values are passed to the compiler and are used as way to perform
+            type profiling, and could be used to observe cell types as well as
+            to perform predictions such as through constant propagation.
+
+            It's therefore desired to enter from the outer-loop when we can,
+            but we need to be executing from that location to capture the
+            right JSValues, otherwise we're confusing the compiler and giving
+            it inaccurate JSValues which can lead it to predict the wrong
+            things, leading to suboptimal code or recompilation due to
+            misprediction, or in super-corner-cases a crash.
+
+            These effects are pretty hard to measure: Fil points out that
+            marsalis-osr-entry really needs mustHandleValues (the JSValues
+            from the point of execution) because right now it just happens to
+            correctly guess int32. I tried removing mustHandleValues entirely
+            and saw no slowdowns, but our benchmarks probably aren't
+            sufficient to reliably find issues, sometimes because we happen to
+            have sufficient mitigations.
+
+            DFG tier-up was added here:
+            https://bugs.webkit.org/show_bug.cgi?id=112838
+
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * dfg/DFGJITCode.h:
+            * dfg/DFGJITCompiler.cpp:
+            (JSC::DFG::JITCompiler::JITCompiler):
+            * dfg/DFGOSREntry.cpp:
+            (JSC::DFG::prepareOSREntry):
+            * dfg/DFGOSREntry.h:
+            (JSC::DFG::prepareOSREntry):
+            * dfg/DFGOperations.cpp:
+            * dfg/DFGOperations.h:
+            * dfg/DFGSpeculativeJIT64.cpp:
+            (JSC::DFG::SpeculativeJIT::compile):
+            * dfg/DFGTierUpEntryTrigger.h: Copied from Source/JavaScriptCore/ftl/FTLOSREntry.h.
+            * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp:
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::ToFTLForOSREntryDeferredCompilationCallback):
+            (JSC::DFG::RefToFTLForOSREntryDeferredCompilationCallback::create):
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously):
+            (JSC::DFG::ToFTLForOSREntryDeferredCompilationCallback::compilationDidComplete):
+            * dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h:
+            * ftl/FTLOSREntry.cpp:
+            (JSC::FTL::prepareOSREntry):
+            * ftl/FTLOSREntry.h:
+            * jit/JITOperations.cpp:
+
+2017-01-26  Matthew Hanson  
+
+        Merge r211167. rdar://problem/30192652
+
+    2017-01-25  Filip Pizlo  
+
+            ARM/ARM64 stress/atomics-store-return.js fails
+            
+
+            Reviewed by Michael Saboff.
+
+            The problem was relying on double->int casts for anything. We need to use toInt32().
+
+            * runtime/AtomicsObject.cpp:
+            (JSC::atomicsFuncCompareExchange):
+            (JSC::atomicsFuncExchange):
+            (JSC::atomicsFuncStore):
+
+2017-01-26  Matthew Hanson  
+
+        Merge r211180. rdar://problem/30156092
+
+    2017-01-25  Matthew Hanson  
+
+            Merge r211124. rdar://problem/30156092
+
+        2017-01-24  Michael Saboff  
+
+                InferredTypeTable entry manipulation is not TOCTOU race safe
+                https://bugs.webkit.org/show_bug.cgi?id=167344
+
+                Reviewed by Filip Pizlo.
+
+                Made the accesses to table values safe from Time of Check,
+                Time of Use races with local temporary values.
+
+                Fixed point that we set an entry in the table to access the
+                current table entry instead of using the local entry.  In that case,
+                we reload the now changed entry.
+
+                * runtime/InferredTypeTable.cpp:
+                (JSC::InferredTypeTable::visitChildren):
+                (JSC::InferredTypeTable::get):
+                (JSC::InferredTypeTable::willStoreValue):
+                (JSC::InferredTypeTable::makeTop):
+
+2017-01-25  Matthew Hanson  
+
+        Rollout r211180.
+
+2017-01-25  Matthew Hanson  
+
+        Merge r211129. rdar://problem/30178458
+
+    2017-01-24  Filip Pizlo  
+
+            Atomics.store should return the int-converted value according to toInteger
+            https://bugs.webkit.org/show_bug.cgi?id=167399
+
+            Reviewed by Saam Barati.
+
+            I keep getting this wrong, but I think I've finally done it right. What we want is for
+            Atomics.store to return the value it was passed after toInteger, which doesn't clip the value to
+            any kind of range. It does get truncated to double.
+
+            This changes the code to pass those "integers" as doubles. It doesn't matter that this is slow,
+            since all of these code paths are slow due to their need to check everything. We'll take care of
+            that by making them intrinsic later.
+
+            * runtime/AtomicsObject.cpp:
+            (JSC::atomicsFuncAdd):
+            (JSC::atomicsFuncAnd):
+            (JSC::atomicsFuncCompareExchange):
+            (JSC::atomicsFuncExchange):
+            (JSC::atomicsFuncLoad):
+            (JSC::atomicsFuncOr):
+            (JSC::atomicsFuncStore):
+            (JSC::atomicsFuncSub):
+            (JSC::atomicsFuncXor):
+
+2017-01-25  Matthew Hanson  
+
+        Merge r211122. rdar://problem/30177808
+
+    2017-01-24  Filip Pizlo  
+
+            Atomics.store should return the int-converted value, not the value that it stored
+            https://bugs.webkit.org/show_bug.cgi?id=167395
+
+            Reviewed by Saam Barati.
+
+            Previously the code was based around passing a lambda that operated over the native type of the
+            operation (so for example int8_t if we were doing things to Int8Arrays). But to support this
+            behavior of store, we need it to be able to control how it converts its result to JSValue and it
+            needs to see its argument as an int32_t. It turns out that it's easy for all of the functions in
+            AtomicsObject.cpp to also adopt this protocol since the conversion to JSValue is just jsNumber()
+            from the native type in those cases, and the conversion from int32_t is done for free in
+            std::atomic.
+
+            * runtime/AtomicsObject.cpp:
+            (JSC::atomicsFuncAdd):
+            (JSC::atomicsFuncAnd):
+            (JSC::atomicsFuncCompareExchange):
+            (JSC::atomicsFuncExchange):
+            (JSC::atomicsFuncLoad):
+            (JSC::atomicsFuncOr):
+            (JSC::atomicsFuncStore):
+            (JSC::atomicsFuncSub):
+            (JSC::atomicsFuncXor):
+
+2017-01-25  Matthew Hanson  
+
+        Merge r211113. rdar://problem/30174692
+
+    2017-01-24  Filip Pizlo  
+
+            -0 is a valid array index and AtomicsObject should know this
+            https://bugs.webkit.org/show_bug.cgi?id=167386
+
+            Reviewed by Mark Lam.
+
+            * runtime/AtomicsObject.cpp: The bug title really says it all.
+
+2017-01-25  Matthew Hanson  
+
+        Merge r211111. rdar://problem/30173375
+
+    2017-01-24  Filip Pizlo  
+
+            Enable the stochastic space-time scheduler on the larger multicores
+            https://bugs.webkit.org/show_bug.cgi?id=167382
+            
+
+            Rubber stamped by Saam Barati
+
+            This looks like a 1.3% JetStream speed-up thanks to a 28% splay-latency improvement. This new
+            scheduler seems to prevent all of the same pathologies as the old one prevented. But instead of
+            periodically suspending the mutator, this new one will only suspend after an iteration of the
+            constraint fixpoint. The length of that suspension length is random with the distribution being
+            governed by mutatorUtilization. Once resumed, the mutator gets to run unimpeded until draining
+            stalls.
+
+            I'm enabling it on platforms as I benchmark those platforms. It's possible that we will want to
+            use a different scheduler on different platforms.
+
+            * runtime/Options.cpp:
+            (JSC::overrideDefaults):
+
+2017-01-25  Matthew Hanson  
+
+        Merge r211124. rdar://problem/30156092
+
+    2017-01-24  Michael Saboff  
+
+            InferredTypeTable entry manipulation is not TOCTOU race safe
+            https://bugs.webkit.org/show_bug.cgi?id=167344
+
+            Reviewed by Filip Pizlo.
+
+            Made the accesses to table values safe from Time of Check,
+            Time of Use races with local temporary values.
+
+            Fixed point that we set an entry in the table to access the
+            current table entry instead of using the local entry.  In that case,
+            we reload the now changed entry.
+
+            * runtime/InferredTypeTable.cpp:
+            (JSC::InferredTypeTable::visitChildren):
+            (JSC::InferredTypeTable::get):
+            (JSC::InferredTypeTable::willStoreValue):
+            (JSC::InferredTypeTable::makeTop):
+
+2017-01-25  Dean Jackson  
+
+        Disable Variation fonts on this branch.
+        
+
+        * Configurations/FeatureDefines.xcconfig:
+
+2017-01-24  Matthew Hanson  
+
+        Merge r211070. rdar://problem/30121809
+
+    2017-01-23  Saam Barati  
+
+            https://bugs.webkit.org/show_bug.cgi?id=167247
+            JSC: operationSpreadGeneric uses the wrong global object for the builtin function and slow_path_spread consults the wrong global object to prove if the iterator protocol is unobservable
+            
+
+            Reviewed by Filip Pizlo.
+
+            There were two bugs in the different tiers with respect to how
+            spread handled global objects.
+
+            The first was in the LLInt/baseline inside slow_path_spread:
+
+            We consulted the lexical global object instead of the thing we're
+            spreading's global object to determine if the array iterator protocol
+            is unobservable. This is wrong if the incoming array is from a different
+            global object. We must consult the incoming array's global object
+            to determine if it can be spread using the fast path.
+
+            The second was in operationSpreadGeneric in the DFG/FTL:
+
+            We were always using the incoming array's global object, even
+            when going down the slow path. This is wrong because we were
+            fetching the builtin iteration function helper from the incoming
+            array's global object, which meant that if the iterator function
+            were to throw an exception, it could leak objects from a different
+            global object. We should be executing the iterator function with
+            the lexical global object.
+
+            * dfg/DFGOperations.cpp:
+            * jsc.cpp:
+            (GlobalObject::finishCreation):
+            (functionGlobalObjectForObject):
+            * runtime/CommonSlowPaths.cpp:
+            (JSC::SLOW_PATH_DECL):
+            * runtime/JSArray.h:
+            * runtime/JSArrayInlines.h:
+            (JSC::JSArray::isIteratorProtocolFastAndNonObservable):
+
+2017-01-24  Matthew Hanson  
+
+        Merge r211069. rdar://problem/30173274
+
+    2017-01-22  Filip Pizlo  
+
+            Land the stochastic space-time scheduler disabled
+            https://bugs.webkit.org/show_bug.cgi?id=167249
+
+            Reviewed by Saam Barati.
+
+            The space-time scheduler is pretty weird. It uses a periodic scheduler where the next period is
+            simply determined by an integer multiple of time since when the scheduler last snapped phase. It
+            snaps phase after constraint solving. Both the snapping of the phase after constraint solving and
+            the periodicity appear to be necessary for good performance. For example, if the space-time
+            scheduler decided that it was in the resume part of the phase just by virtue of having just
+            resumed, then it would be empirically worse than our scheduler which asks "what time is it?" to
+            decide whether it should be suspended or resumed even if it just suspended or resumed. I've spent
+            a lot of time wondering why these two features are essential, and I think I found a reason.
+
+            What's happening is that sometimes the GC has an overrun and its increment takes longer than it
+            should have. The current scheduler forgives overruns when constraint solving, which seems to
+            make sense because it cannot control whether constraint solving runs with the mutator resumed or
+            suspended. It has to be suspended currently. Snapping phase after constraint solving accomplishes
+            this. What's more surprising is how important it is to manage deadline misses during draining.
+            The relevant kind of deadline miss is when doing mutator-suspended draining to catch up to the
+            retreating wavefront. Deadline misses while doing this can happen systematically in some
+            workloads, like JetStream/hash-map and some test in Speedometer. It's because they have some
+            ginormous object and it takes like ~3ms+-1.5ms just to scan it. The space-time scheduler's use
+            of time to decide what to do saves the day here: after the deadline miss, the scheduler will
+            initially realize that it missed its deadline to resume the mutator. But as soon as it does this
+            it asks: "based on current time since phase snap, what should I do?". In the case of a deadline
+            miss, this question is essentially a weighted coin flip because of the high noise in the amount
+            of time that it takes to do things in the GC. If you overrun, you will probably overrun by
+            multiple milliseconds, which is enough that where you land in the space-time scheduler's timeline
+            is random. The likelihood that you land in the "resume mutator" part of the timeline has a
+            probability that is roughly the same as what the space-time scheduler calls mutator utilization.
+            This is a super weird property. I did not intend for it to have this property, but it appears to
+            be the most important property of this scheduler.
+
+            Based on this, it seems that the fact that the space-time scheduler could suspend the mutator
+            before draining runs out of work doesn't accomplish anything. As soon as you resume the
+            mutator, you have a retreating wavefront to worry about. But if the collector is happily scanning
+            things then it's almost certain that the collector will outpace the mutator. Also, anything that
+            the mutator asks us to revisit is deferred anyway.
+
+            In the past I've tried to replace the scheduler in one patch and this turned out to be annoying
+            because even a poorly conceived scheduler should be iterated on. This patch lands a new scheduler
+            called the StochasticSpaceTime scheduler. It replaces two of the known-good features of the old
+            scheduler: (1) it forgives constraint pauses and (2) after deadline overrun its choice is random,
+            weighted by the mutator utilization target. Unlike the old scheduler, this one will only suspend
+            the mutator when the draining terminates, but it may pause for any amount of time after an
+            iteration of constraint solving. It computes the targetPause by measuring constraint solving time
+            and multiplying by the pauseScale (0.3 by default). If smaller then minimumPause (0.3ms by
+            default), then it uses minimumPause instead. The stochastic scheduler will then definitely do at
+            least targetPause worth of suspended draining after the constraint solving iteration, and then
+            it will decide whether or not to do another one at random. The probability that it will choose to
+            resume is exactly mutatorUtilization, which is computed exactly as before. Therefore, the
+            probability of resumption starts at 0.7 and goes down as memory usage rises. Conversely, the
+            probability that we will stay suspended starts at 0.3 and goes up from there.
+
+            This new scheduler looks like it might be a 25% improvement on splay-latency. It also looks like
+            a small progression on hash-map. Hash-map is a great test of one of the worst cases of retreating
+            wavefront, since it is repeatedly storing to a ginormous array. This array is sure to take a
+            while to scan, and to complete, the GC must be smart enough to visit any new objects it finds
+            while scanning the array immediately after scanning that array. This new scheduler means that
+            after scanning the array, the probability that you will scan whatever you found in it starts at
+            0.3 and rises as the program allocates. It's sure to be 0.3, and not 0.3^k, because after the
+            wavefront stops advancing, the only object on the mark stack after a constraint iteration will be
+            that array. Since there is sure to be a 0.3ms or longer pause, the GC will be sure to start
+            visiting this object. The GC can then complete if it just allows enough time after this to scan
+            whatever new objects it finds. If scanning the array overruns the deadline (and it almost
+            certainly will) then the probability that the GC keeps the mutator suspended is simply
+            1 - mutatorUtilization.
+
+            This scheduler is disabled by default. You can enable it with
+            --useStochasticMutatorScheduler=true.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * heap/Heap.cpp:
+            (JSC::Heap::Heap):
+            (JSC::Heap::markToFixpoint):
+            * heap/Heap.h:
+            * heap/MarkingConstraintSet.cpp:
+            (JSC::MarkingConstraintSet::didStartMarking):
+            (JSC::MarkingConstraintSet::executeConvergenceImpl):
+            (JSC::MarkingConstraintSet::resetStats): Deleted.
+            (JSC::MarkingConstraintSet::executeBootstrap): Deleted.
+            * heap/MarkingConstraintSet.h:
+            * heap/MutatorScheduler.cpp:
+            (JSC::MutatorScheduler::didReachTermination):
+            (JSC::MutatorScheduler::synchronousDrainingDidStall):
+            * heap/MutatorScheduler.h:
+            * heap/SlotVisitor.cpp:
+            (JSC::SlotVisitor::didReachTermination):
+            (JSC::SlotVisitor::drainFromShared):
+            * heap/StochasticSpaceTimeMutatorScheduler.cpp: Added.
+            (JSC::StochasticSpaceTimeMutatorScheduler::Snapshot::Snapshot):
+            (JSC::StochasticSpaceTimeMutatorScheduler::Snapshot::now):
+            (JSC::StochasticSpaceTimeMutatorScheduler::Snapshot::bytesAllocatedThisCycle):
+            (JSC::StochasticSpaceTimeMutatorScheduler::StochasticSpaceTimeMutatorScheduler):
+            (JSC::StochasticSpaceTimeMutatorScheduler::~StochasticSpaceTimeMutatorScheduler):
+            (JSC::StochasticSpaceTimeMutatorScheduler::state):
+            (JSC::StochasticSpaceTimeMutatorScheduler::beginCollection):
+            (JSC::StochasticSpaceTimeMutatorScheduler::didStop):
+            (JSC::StochasticSpaceTimeMutatorScheduler::willResume):
+            (JSC::StochasticSpaceTimeMutatorScheduler::didReachTermination):
+            (JSC::StochasticSpaceTimeMutatorScheduler::didExecuteConstraints):
+            (JSC::StochasticSpaceTimeMutatorScheduler::synchronousDrainingDidStall):
+            (JSC::StochasticSpaceTimeMutatorScheduler::timeToStop):
+            (JSC::StochasticSpaceTimeMutatorScheduler::timeToResume):
+            (JSC::StochasticSpaceTimeMutatorScheduler::log):
+            (JSC::StochasticSpaceTimeMutatorScheduler::endCollection):
+            (JSC::StochasticSpaceTimeMutatorScheduler::setResumeTime):
+            (JSC::StochasticSpaceTimeMutatorScheduler::bytesAllocatedThisCycleImpl):
+            (JSC::StochasticSpaceTimeMutatorScheduler::bytesSinceBeginningOfCycle):
+            (JSC::StochasticSpaceTimeMutatorScheduler::maxHeadroom):
+            (JSC::StochasticSpaceTimeMutatorScheduler::headroomFullness):
+            (JSC::StochasticSpaceTimeMutatorScheduler::mutatorUtilization):
+            * heap/StochasticSpaceTimeMutatorScheduler.h: Added.
+            * runtime/Options.cpp:
+            (JSC::overrideDefaults):
+            * runtime/Options.h:
+
+2017-01-24  Matthew Hanson  
+
+        Merge r211065. rdar://problem/29784295
+
+    2017-01-23  Filip Pizlo  
+
+            SharedArrayBuffer plus WebGL should not equal CRASH
+            https://bugs.webkit.org/show_bug.cgi?id=167329
+
+            Reviewed by Saam Barati.
+
+            DOM unwrapping methods should return null rather than crashing. The code expects an
+            unshared buffer, so we should return null when it's shared. The caller can then decide
+            if they like null or not.
+
+            * runtime/JSArrayBufferViewInlines.h:
+            (JSC::JSArrayBufferView::toWrapped):
+
+2017-01-24  Matthew Hanson  
+
+        Merge r211043. rdar://problem/30134434
+
+    2017-01-23  Michael Saboff  
+
+            IntlObject uses JSArray::tryCreateUninitialized in an unsafe way
+            https://bugs.webkit.org/show_bug.cgi?id=167288
+
+            Reviewed by Filip Pizlo.
+
+            Refactored the following "create" methods into a "tryCreate" method and a
+            "create" wrapper: JSArray::create(), Butterfly::create() and
+            createArrayButterfly().
+
+            Changed IntlObject.cpp to use JSArray::tryCreate() as it is simpler to use
+            by not requiring the caller to be GC savey.  The performance benefits of
+            tryCreateUninitialized() are not needed by the IntlObject c++ code.
+
+            Did not add a new test as the bug caused LayoutTests/js/intl.html to fail
+            reliably with the JSC option values scribbleFreeCells=true,
+            collectContinuously=true and JSC_useGenerationalGC=false.
+
+            * runtime/Butterfly.h:
+            * runtime/ButterflyInlines.h:
+            (JSC::Butterfly::tryCreate): Added.
+            (JSC::Butterfly::create):
+            * runtime/IntlObject.cpp:
+            (JSC::canonicalizeLocaleList):
+            (JSC::lookupSupportedLocales):
+            (JSC::intlObjectFuncGetCanonicalLocales):
+            * runtime/JSArray.h:
+            (JSC::createContiguousArrayButterfly): Deleted.
+            (JSC::tryCreateArrayButterfly): Added.
+            (JSC::createArrayButterfly):
+            (JSC::JSArray::tryCreate): Added.
+            (JSC::JSArray::create):
+
+2017-01-24  Matthew Hanson  
+
+        Merge r210971. rdar://problem/30115838
+
+    2017-01-20  Saam Barati  
+
+            We should flash a safepoint before each DFG/FTL phase
+            https://bugs.webkit.org/show_bug.cgi?id=167234
+
+            Reviewed by Filip Pizlo.
+
+            The recent GC changes caused us to regress Kraken because of a
+            longstanding issue that happened to be hit with higher frequency because
+            of a change in timing between when a particular GC was happening and
+            when a particular FTL compilation was happening. The regression is caused
+            by the GC was waiting for a large function to make it through the DFG portion
+            of an FTL compilation. This was taking 20ms-30ms and started happened during a
+            particular test with much higher frequency.
+
+            This means that anytime the GC waits for this compilation, the test ran at least
+            ~20ms slower because the GC waits for the compiler threads the mutator is stopped.
+
+            It's good that we have such an easily reproducible case of this performance
+            issue because it will effect many real JS programs, especially ones with
+            large functions that get hot.
+
+            The most straight forward solution to fix this is to flash a safepoint before
+            each phase, allowing the GC to suspend the compiler if needed. In my testing,
+            this progresses Kraken in the browser, and doesn't regress anything else. This
+            solution also makes the most sense. I did some analysis on the compilation time
+            of this function that took ~20-30ms to pass through the DFG phases, and
+            the phase times were mostly evenly distributed. Some took longer than others,
+            but no phase was longer than 3ms. Most were in the 0.25ms to 1.5ms range.
+
+            * dfg/DFGPlan.cpp:
+            (JSC::DFG::Plan::compileInThreadImpl):
+            * dfg/DFGSafepoint.cpp:
+            (JSC::DFG::Safepoint::begin):
+            * runtime/Options.h:
+
+2017-01-20  Matthew Hanson  
+
+        Merge r210949. rdar://problem/30108531
+
+    2017-01-19  Chris Dumez  
+
+            iterable<> should be enabled on WK1
+            https://bugs.webkit.org/show_bug.cgi?id=167221
+            
+
+            Reviewed by Youenn Fablet.
+
+            * runtime/CommonIdentifiers.h:
+
+2017-01-20  Matthew Hanson  
+
+        Merge r210947. rdar://problem/30108809
+
+    2017-01-19  Filip Pizlo  
+
+            Structure::pin() needs to be called while holding a lock
+            https://bugs.webkit.org/show_bug.cgi?id=167220
+
+            Reviewed by Saam Barati.
+
+            Imagine this race: the mutator calls pin() and the collector calls visitChildren(),
+            on the same Structure at the same time. In trunk pin() does not require a lock to be
+            held and it doesn't grab any locks. Meanwhile visitChildren() grabs the lock, checks
+            if the structure is pinned, and if not, it removes it by overwriting with zero. Now
+            imagine how this plays out when pin() runs. Since pin() grabs no locks, it is
+            irrelevant that visitChildren() grabs any locks. So, visitChildren() might check if
+            the table is pinned before pin() pins it, and then clear the table after it was
+            already pinned.
+
+            The problem here is that pin() should be holding a lock. We could either make pin()
+            grab that lock by itself, or what this patch does is makes the caller grab the lock.
+            This is great because it means that sometimes we don't have to introduce any new
+            locking.
+
+            This fixes a materializePropertyTable() checkOffsetConsistency() crash that happens
+            very rarely, but I was able to get it to reproduce with run-webkit-tests and
+            aggressive GC settings.
+
+            * runtime/ConcurrentJSLock.h:
+            * runtime/Structure.cpp:
+            (JSC::Structure::materializePropertyTable):
+            (JSC::Structure::changePrototypeTransition):
+            (JSC::Structure::attributeChangeTransition):
+            (JSC::Structure::toDictionaryTransition):
+            (JSC::Structure::nonPropertyTransition):
+            (JSC::Structure::pin):
+            (JSC::Structure::pinForCaching):
+            (JSC::Structure::add):
+            * runtime/Structure.h:
+            * runtime/StructureInlines.h:
+            (JSC::Structure::checkOffsetConsistency):
+            (JSC::Structure::add):
+            (JSC::Structure::addPropertyWithoutTransition):
+
+2017-01-20  Matthew Hanson  
+
+        Merge r210935. rdar://problem/30101860
+
+    2017-01-19  Filip Pizlo  
+
+            The mutator needs to fire a barrier after memmoving stuff around in an object that the GC scans
+            https://bugs.webkit.org/show_bug.cgi?id=167208
+
+            Reviewed by Saam Barati.
+
+            It used to be that if you moved a value from one place to another in the same object
+            then there is no need for a barrier because the generational GC would have no need to
+            know that some old object still continues to refer to the same other old object.
+
+            But the concurrent GC might scan that object as the mutator moves pointers around in
+            it. If the ordering is right, this could mean that the collector never sees some of
+            those pointers. This can be fixed by adding a barrier.
+
+            This fixes the most obvious cases I found. There may be more and I'll continue to
+            audit. Most of the other memmove users seem to already use some kind of synchronization
+            to prevent this. For example, this can also be fixed by just holding the cell lock
+            around the memmove since we're dealing with indexing storage and the GC reads that
+            under the cell lock.
+
+            * runtime/JSArray.cpp:
+            (JSC::JSArray::shiftCountWithAnyIndexingType):
+            (JSC::JSArray::unshiftCountWithAnyIndexingType):
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210858. rdar://problem/30069096
+
+    2017-01-18  Filip Pizlo  
+
+            JSObjectSetPrivate should not use jsCast<>
+            rdar://problem/30069096
+
+            Reviewed by Keith Miller.
+
+            * API/JSObjectRef.cpp:
+            (JSObjectSetPrivate):
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210844. rdar://problem/29993906
+
+    2017-01-16  Filip Pizlo  
+
+            Make opaque root scanning truly constraint-based
+            https://bugs.webkit.org/show_bug.cgi?id=165760
+
+            Reviewed by Geoffrey Garen.
+
+            We have bugs when visitChildren() changes its mind about what opaque root to add, since
+            we don't have barriers on opaque roots. This supposedly once worked for generational GC,
+            and I started adding more barriers to support concurrent GC. But I think that the real
+            bug here is that we want the JSObject->OpaqueRoot to be evaluated as a constraint that
+            participates in the fixpoint. I like to think of this as an *output* constraint, because it
+            is concerned with outgoing edges in the heap from the object that registered the constraint.
+            An *input* constraint is like what Weak<> does when deciding whether the thing it points to
+            should be live.
+
+            Whether or not an object has output constraints depends on its type. So, we want the GC to
+            have a feature where we rapidly call some function on all marked objects of some type.
+
+            It's easy to rapidly scan all marked objects in a MarkedBlock. So, we want to allocate all
+            objects that have output constraints in their own MarkedBlocks and we want to track the set
+            of MarkedBlocks with output constraints.
+
+            This patch makes it easy to have clients of JSC's internal C++ APIs create a Subspace - like
+            what we used to call MarkedSpace::Subspace but now it's in the JSC namespace - which is
+            a collection of objects that you can easily scan during GC from a MarkingConstraint. It's
+            now possible for internal C++ API clients to register their own MarkingConstraints. The DOM
+            now uses this to create two Subspaces (more on why two below) and it calls
+            JSCell::visitOutputConstraints() on all of the marked objects in those subspaces using a new
+            MarkingConstraint. That MarkingConstraint uses a new style of volatility, called
+            SeldomGreyed, which is like GreyedByExecution except it is opportunistically not executed
+            as roots in the hopes that their sole execution will be the snapshot-at-the-end. I also
+            converted the CodeBlock rescan constraint to SeldomGreyed, since that's also an output
+            constraint.
+
+            This patch also uses Subspace for something pretty obvious: knowing how to call the
+            destructor. Subspaces can specialize the sweep for their way of invoking destructors. We
+            have the following subspaces:
+
+            - auxiliary
+            - cell
+            - destructibleCell - for JSCell subclasses that have destructors and StructureIsImmortal
+            - stringSpace - inlines ~JSString into the sweep, making string allocation 7% faster
+            - destructibleObjectSpace - for JSDestructibleObject subclasses
+
+            And WebCore adds:
+
+            - outputConstraint - for JSDOMObjects that have a visitAdditionalChildren
+            - globalObjectOutputConstraint - for JSDOMGlobalObjects that have a visitAdditionalChildren,
+              since JSDOMGlobalObjects are not JSDestructibleObjects
+
+            The Subspace for a type is selected by saying JSC::subspaceFor(vm). This calls
+            Type::subspaceFor(vm). This allows cell classes to override subspaceFor<> and it
+            allows any subspaceFor<> implementation to query static flags in the type. This is how
+            JSCell::subspaceFor<> can select either cellSpace or destructibleCellSpace.
+
+            This patch is mostly about:
+
+            - Moving MarkedSpace::Subspace out of MarkedSpace and making it a nice class with a nice
+              API. Almost all of its functionality is just taken out of MarkedSpace.
+            - Converting users of the old API for allocating objects and getting MarkedAllocators, like
+              heap.allocatorForObjectWithoutDestructor() and its friends. That would now say
+              vm.cellSpace.allocatorFor().
+
+            Altogether, this means that we only have a small regression on Dromaeo. The regression is
+            due to the fact that we scan output constraints. Before the Subspace optimizations (see
+            r209766, which was rolled out in r209812), this regression on Dromaeo/jslib was 2x but after
+            the optimizations in this patch it's only 1.12x. Note that Dromaeo/jslib creats gigabytes of
+            DOM nodes. Compared to web pages, this is a very extreme synthetic microbenchmark. Still, we
+            like optimizing these because we don't want to presume what web pages will look like.
+
+            The use of Subspaces to specialize destructors happened not because it's super necessary but
+            because I wanted to introduce a single unified way of communicating to the GC how to treat
+            different types. Any Subspace feature that allowed us to collect some types together would
+            have to be mindful of the destructorness of objects. I could have turned this into a
+            liability where each Subspace has two subsubspaces - one for destructor objects and one for
+            non-destructor objects, which would have allowed me to keep the old sweep specialization
+            code. Just days prior, mlam wanted to do something that was hard because of that old sweep
+            specializer, so I decided to take the opportunity to fix the sweep specializer while also
+            making Subspace be the one true way of teaching the GC about types. To validate that this
+            actually does things, I added a JSStringSubspace and a test that shows that this is a 7%
+            string allocation progression.
+
+            In bug 167066, I'm getting rid of the rest of the code in JSC that would special-case for
+            JSDestructibleObject vs StructureIsImmortal by using the GC's DestructionMode. After that,
+            Subspace will be only mechanism by which JSC uses the GC to encode types.
+
+            Prior to this change, having multiple MarkedSpace::Subspaces would have been expensive
+            because they create a bunch of MarkedAllocators upfront. We now have the ability to create
+            MarkedAllocators lazily. We create them on the first allocation from that size class or when
+            a JIT asks for the MarkedAllocator. The concurrent JITs can ask for MarkedAllocators because
+            their creation is under a lock.
+
+            On my machine, this might be a 1.1% JetStream speed-up with 87% confidence and it might be
+            a 0.4% PLT3 slow-down with 92% confidence. Note that 0.4% on PLT3 is the level of systematic
+            error on PLT3 on my computer: I've seen definite 0.4% speed-ups and slow-downs that were not
+            confirmed by any bot. Let's see what the bots say.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * bytecode/ObjectAllocationProfile.h:
+            (JSC::ObjectAllocationProfile::initialize):
+            * bytecode/PolymorphicAccess.cpp:
+            (JSC::AccessCase::generateImpl):
+            * dfg/DFGSpeculativeJIT.cpp:
+            (JSC::DFG::SpeculativeJIT::emitAllocateRawObject):
+            (JSC::DFG::SpeculativeJIT::compileMakeRope):
+            (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
+            (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
+            (JSC::DFG::SpeculativeJIT::compileNewTypedArray):
+            (JSC::DFG::SpeculativeJIT::emitAllocateButterfly):
+            * dfg/DFGSpeculativeJIT64.cpp:
+            (JSC::DFG::SpeculativeJIT::compile):
+            * ftl/FTLAbstractHeapRepository.h:
+            * ftl/FTLLowerDFGToB3.cpp:
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewTypedArray):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMakeRope):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorageWithSizeImpl):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocatorForSize):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateVariableSizedObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateVariableSizedCell):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateJSArray):
+            * heap/AllocatorAttributes.h:
+            (JSC::AllocatorAttributes::AllocatorAttributes):
+            * heap/ConstraintVolatility.h: Added.
+            (WTF::printInternal):
+            * heap/GCActivityCallback.cpp:
+            * heap/Heap.cpp:
+            (JSC::Heap::Heap):
+            (JSC::Heap::lastChanceToFinalize):
+            (JSC::Heap::markToFixpoint):
+            (JSC::Heap::updateObjectCounts):
+            (JSC::Heap::collectAllGarbage):
+            (JSC::Heap::collectInThread):
+            (JSC::Heap::stopTheWorld):
+            (JSC::Heap::updateAllocationLimits):
+            (JSC::Heap::bytesVisited):
+            (JSC::Heap::addCoreConstraints):
+            (JSC::Heap::addMarkingConstraint):
+            (JSC::Heap::notifyIsSafeToCollect):
+            (JSC::Heap::preventCollection):
+            (JSC::Heap::allowCollection):
+            (JSC::Heap::setMutatorShouldBeFenced):
+            (JSC::Heap::buildConstraintSet): Deleted.
+            (JSC::Heap::writeBarrierOpaqueRootSlow): Deleted.
+            (JSC::Heap::addMutatorShouldBeFencedCache): Deleted.
+            * heap/Heap.h:
+            (JSC::Heap::mutatorExecutionVersion):
+            (JSC::Heap::numOpaqueRoots):
+            (JSC::Heap::vm): Deleted.
+            (JSC::Heap::subspaceForObjectWithoutDestructor): Deleted.
+            (JSC::Heap::subspaceForObjectDestructor): Deleted.
+            (JSC::Heap::subspaceForAuxiliaryData): Deleted.
+            (JSC::Heap::allocatorForObjectWithoutDestructor): Deleted.
+            (JSC::Heap::allocatorForObjectWithDestructor): Deleted.
+            (JSC::Heap::allocatorForAuxiliaryData): Deleted.
+            * heap/HeapInlines.h:
+            (JSC::Heap::vm):
+            (JSC::Heap::allocateWithDestructor): Deleted.
+            (JSC::Heap::allocateWithoutDestructor): Deleted.
+            (JSC::Heap::allocateObjectOfType): Deleted.
+            (JSC::Heap::subspaceForObjectOfType): Deleted.
+            (JSC::Heap::allocatorForObjectOfType): Deleted.
+            (JSC::Heap::allocateAuxiliary): Deleted.
+            (JSC::Heap::tryAllocateAuxiliary): Deleted.
+            (JSC::Heap::tryReallocateAuxiliary): Deleted.
+            (JSC::Heap::ascribeOwner): Deleted.
+            (JSC::Heap::writeBarrierOpaqueRoot): Deleted.
+            * heap/LargeAllocation.cpp:
+            (JSC::LargeAllocation::tryCreate):
+            (JSC::LargeAllocation::LargeAllocation):
+            (JSC::LargeAllocation::~LargeAllocation):
+            (JSC::LargeAllocation::sweep):
+            * heap/LargeAllocation.h:
+            * heap/MarkedAllocator.cpp:
+            (JSC::MarkedAllocator::MarkedAllocator):
+            (JSC::MarkedAllocator::tryAllocateWithoutCollecting):
+            (JSC::MarkedAllocator::tryAllocateIn):
+            (JSC::MarkedAllocator::allocateSlowCaseImpl):
+            (JSC::MarkedAllocator::tryAllocateBlock):
+            (JSC::MarkedAllocator::shrink):
+            (JSC::MarkedAllocator::markedSpace):
+            * heap/MarkedAllocator.h:
+            (JSC::MarkedAllocator::nextAllocatorInSubspace):
+            (JSC::MarkedAllocator::setNextAllocatorInSubspace):
+            (JSC::MarkedAllocator::subspace):
+            (JSC::MarkedAllocator::tryAllocate): Deleted.
+            (JSC::MarkedAllocator::allocate): Deleted.
+            (JSC::MarkedAllocator::forEachBlock): Deleted.
+            * heap/MarkedAllocatorInlines.h: Added.
+            (JSC::MarkedAllocator::tryAllocate):
+            (JSC::MarkedAllocator::allocate):
+            (JSC::MarkedAllocator::forEachBlock):
+            (JSC::MarkedAllocator::forEachNotEmptyBlock):
+            * heap/MarkedBlock.cpp:
+            (JSC::MarkedBlock::Handle::subspace):
+            (JSC::MarkedBlock::Handle::sweep):
+            (JSC::MarkedBlock::Handle::specializedSweep): Deleted.
+            (JSC::MarkedBlock::Handle::sweepHelperSelectScribbleMode): Deleted.
+            (JSC::MarkedBlock::Handle::sweepHelperSelectEmptyMode): Deleted.
+            (JSC::MarkedBlock::Handle::sweepHelperSelectHasNewlyAllocated): Deleted.
+            (JSC::MarkedBlock::Handle::sweepHelperSelectSweepMode): Deleted.
+            (JSC::MarkedBlock::Handle::sweepHelperSelectMarksMode): Deleted.
+            * heap/MarkedBlock.h:
+            (JSC::MarkedBlock::Handle::visitWeakSet):
+            * heap/MarkedBlockInlines.h:
+            (JSC::MarkedBlock::Handle::isNewlyAllocatedStale):
+            (JSC::MarkedBlock::Handle::hasAnyNewlyAllocated):
+            (JSC::MarkedBlock::heap):
+            (JSC::MarkedBlock::space):
+            (JSC::MarkedBlock::Handle::space):
+            (JSC::MarkedBlock::Handle::specializedSweep):
+            (JSC::MarkedBlock::Handle::finishSweepKnowingSubspace):
+            (JSC::MarkedBlock::Handle::sweepDestructionMode):
+            (JSC::MarkedBlock::Handle::emptyMode):
+            (JSC::MarkedBlock::Handle::scribbleMode):
+            (JSC::MarkedBlock::Handle::newlyAllocatedMode):
+            (JSC::MarkedBlock::Handle::marksMode):
+            (JSC::MarkedBlock::Handle::forEachMarkedCell):
+            * heap/MarkedSpace.cpp:
+            (JSC::MarkedSpace::initializeSizeClassForStepSize):
+            (JSC::MarkedSpace::MarkedSpace):
+            (JSC::MarkedSpace::lastChanceToFinalize):
+            (JSC::MarkedSpace::addMarkedAllocator):
+            (JSC::MarkedSpace::allocate): Deleted.
+            (JSC::MarkedSpace::tryAllocate): Deleted.
+            (JSC::MarkedSpace::allocateLarge): Deleted.
+            (JSC::MarkedSpace::tryAllocateLarge): Deleted.
+            * heap/MarkedSpace.h:
+            (JSC::MarkedSpace::heap):
+            (JSC::MarkedSpace::allocatorLock):
+            (JSC::MarkedSpace::subspaceForObjectsWithDestructor): Deleted.
+            (JSC::MarkedSpace::subspaceForObjectsWithoutDestructor): Deleted.
+            (JSC::MarkedSpace::subspaceForAuxiliaryData): Deleted.
+            (JSC::MarkedSpace::allocatorFor): Deleted.
+            (JSC::MarkedSpace::destructorAllocatorFor): Deleted.
+            (JSC::MarkedSpace::auxiliaryAllocatorFor): Deleted.
+            (JSC::MarkedSpace::allocateWithoutDestructor): Deleted.
+            (JSC::MarkedSpace::allocateWithDestructor): Deleted.
+            (JSC::MarkedSpace::allocateAuxiliary): Deleted.
+            (JSC::MarkedSpace::tryAllocateAuxiliary): Deleted.
+            (JSC::MarkedSpace::forEachSubspace): Deleted.
+            * heap/MarkingConstraint.cpp:
+            (JSC::MarkingConstraint::MarkingConstraint):
+            * heap/MarkingConstraint.h:
+            (JSC::MarkingConstraint::volatility):
+            * heap/MarkingConstraintSet.cpp:
+            (JSC::MarkingConstraintSet::resetStats):
+            (JSC::MarkingConstraintSet::add):
+            (JSC::MarkingConstraintSet::executeConvergenceImpl):
+            * heap/MarkingConstraintSet.h:
+            * heap/SlotVisitor.cpp:
+            (JSC::SlotVisitor::visitChildren):
+            (JSC::SlotVisitor::visitAsConstraint):
+            (JSC::SlotVisitor::drain):
+            (JSC::SlotVisitor::addOpaqueRoot):
+            (JSC::SlotVisitor::mergeIfNecessary):
+            (JSC::SlotVisitor::mergeOpaqueRootsIfNecessary): Deleted.
+            * heap/SlotVisitor.h:
+            (JSC::SlotVisitor::setIgnoreNewOpaqueRoots):
+            * heap/SlotVisitorInlines.h:
+            (JSC::SlotVisitor::reportExtraMemoryVisited):
+            (JSC::SlotVisitor::reportExternalMemoryVisited):
+            * heap/Subspace.cpp: Added.
+            (JSC::Subspace::Subspace):
+            (JSC::Subspace::~Subspace):
+            (JSC::Subspace::finishSweep):
+            (JSC::Subspace::destroy):
+            (JSC::Subspace::allocate):
+            (JSC::Subspace::tryAllocate):
+            (JSC::Subspace::allocatorForSlow):
+            (JSC::Subspace::allocateSlow):
+            (JSC::Subspace::tryAllocateSlow):
+            * heap/Subspace.h: Added.
+            (JSC::Subspace::tryAllocatorFor):
+            (JSC::Subspace::allocatorFor):
+            * heap/SubspaceInlines.h: Added.
+            (JSC::Subspace::forEachMarkedBlock):
+            (JSC::Subspace::forEachNotEmptyMarkedBlock):
+            (JSC::Subspace::forEachLargeAllocation):
+            (JSC::Subspace::forEachMarkedCell):
+            * heap/WeakBlock.cpp:
+            (JSC::WeakBlock::specializedVisit):
+            * heap/WeakBlock.h:
+            * heap/WeakSet.h:
+            (JSC::WeakSet::visit):
+            * jit/AssemblyHelpers.h:
+            (JSC::AssemblyHelpers::emitAllocateJSObjectWithKnownSize):
+            (JSC::AssemblyHelpers::emitAllocateVariableSized):
+            (JSC::AssemblyHelpers::emitAllocateVariableSizedCell):
+            * jit/JITOpcodes.cpp:
+            (JSC::JIT::emit_op_new_object):
+            * jsc.cpp:
+            * runtime/ButterflyInlines.h:
+            (JSC::Butterfly::createUninitialized):
+            (JSC::Butterfly::growArrayRight):
+            * runtime/ClassInfo.h:
+            * runtime/ClonedArguments.cpp:
+            (JSC::ClonedArguments::createEmpty):
+            * runtime/DirectArguments.cpp:
+            (JSC::DirectArguments::overrideThings):
+            * runtime/GenericArgumentsInlines.h:
+            (JSC::GenericArguments::initModifiedArgumentsDescriptor):
+            * runtime/HashMapImpl.h:
+            (JSC::HashMapBuffer::create):
+            * runtime/JSArray.cpp:
+            (JSC::JSArray::tryCreateUninitialized):
+            (JSC::JSArray::unshiftCountSlowCase):
+            * runtime/JSArrayBufferView.cpp:
+            (JSC::JSArrayBufferView::ConstructionContext::ConstructionContext):
+            * runtime/JSCell.h:
+            (JSC::subspaceFor):
+            * runtime/JSCellInlines.h:
+            (JSC::JSCell::visitOutputConstraints):
+            (JSC::JSCell::subspaceFor):
+            (JSC::allocateCell):
+            * runtime/JSDestructibleObject.h:
+            (JSC::JSDestructibleObject::subspaceFor):
+            * runtime/JSDestructibleObjectSubspace.cpp: Added.
+            (JSC::JSDestructibleObjectSubspace::JSDestructibleObjectSubspace):
+            (JSC::JSDestructibleObjectSubspace::~JSDestructibleObjectSubspace):
+            (JSC::JSDestructibleObjectSubspace::finishSweep):
+            (JSC::JSDestructibleObjectSubspace::destroy):
+            * runtime/JSDestructibleObjectSubspace.h: Added.
+            * runtime/JSObject.h:
+            (JSC::JSObject::JSObject):
+            * runtime/JSObjectInlines.h:
+            * runtime/JSSegmentedVariableObject.h:
+            * runtime/JSString.h:
+            (JSC::JSString::subspaceFor):
+            * runtime/JSStringSubspace.cpp: Added.
+            (JSC::JSStringSubspace::JSStringSubspace):
+            (JSC::JSStringSubspace::~JSStringSubspace):
+            (JSC::JSStringSubspace::finishSweep):
+            (JSC::JSStringSubspace::destroy):
+            * runtime/JSStringSubspace.h: Added.
+            * runtime/RegExpMatchesArray.h:
+            (JSC::tryCreateUninitializedRegExpMatchesArray):
+            * runtime/VM.cpp:
+            (JSC::VM::VM):
+            * runtime/VM.h:
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210829. rdar://problem/30044439
+
+    2017-01-16  Filip Pizlo  
+
+            JSCell::classInfo() shouldn't have a bunch of mitigations for being called during destruction
+            https://bugs.webkit.org/show_bug.cgi?id=167066
+
+            Reviewed by Keith Miller and Michael Saboff.
+
+            This reduces the size of JSCell::classInfo() by half and removes some checks that
+            this function previously had to do in case it was called from destructors.
+
+            I changed all of the destructors so that they don't call JSCell::classInfo() and I
+            added an assertion to JSCell::classInfo() to catch cases where someone called it
+            from a destructor accidentally.
+
+            This means that we only have one place in destruction that needs to know the class:
+            the sweeper's call to the destructor.
+
+            One of the trickiest outcomes of this is the need to support inherits() tests in
+            JSObjectGetPrivate(), when it is called from the destructor callback on the object
+            being destructed. JSObjectGetPrivate() is undefined behavior anyway if you use it
+            on any dead-but-not-destructed object other than the one being destructed right
+            now. The purpose of the inherits() tests is to distinguish between different kinds
+            of CallbackObjects, which may have different kinds of base classes. I think that
+            this was always subtly wrong - for example, if the object being destructed is a
+            JSGlobalObject then it's not a DestructibleObject, is not in a destructor block,
+            but does not have an immortal Structure - so classInfo() is not valid. This fixes
+            the issue by having ~JSCallbackObject know its classInfo. It now stashes its
+            classInfo in VM so that JSObjectGetPrivate can use that classInfo if it detects
+            that it's being used on a currently-destructing object.
+
+            That was the only really weird part of this patch. The rest is mostly removing
+            illegal uses of jsCast<> in destructors. There were a few other genuine uses of
+            classInfo() but they were in code that already knew how to get its classInfo()
+            using other means:
+
+            - You can still say structure()->classInfo(), and I use this form in code that
+              knows that its StructureIsImmortal.
+
+            - You can use this->classInfo() if it's overridden, like in subclasses of
+              JSDestructibleObject.
+
+            Rolling this back in because I think I fixed the crashes.
+
+            * API/JSAPIWrapperObject.mm:
+            (JSAPIWrapperObjectHandleOwner::finalize):
+            * API/JSCallbackObject.h:
+            * API/JSCallbackObjectFunctions.h:
+            (JSC::JSCallbackObject::~JSCallbackObject):
+            (JSC::JSCallbackObject::init):
+            * API/JSObjectRef.cpp:
+            (classInfoPrivate):
+            (JSObjectGetPrivate):
+            (JSObjectSetPrivate):
+            * bytecode/EvalCodeBlock.cpp:
+            (JSC::EvalCodeBlock::destroy):
+            * bytecode/FunctionCodeBlock.cpp:
+            (JSC::FunctionCodeBlock::destroy):
+            * bytecode/ModuleProgramCodeBlock.cpp:
+            (JSC::ModuleProgramCodeBlock::destroy):
+            * bytecode/ProgramCodeBlock.cpp:
+            (JSC::ProgramCodeBlock::destroy):
+            * bytecode/UnlinkedEvalCodeBlock.cpp:
+            (JSC::UnlinkedEvalCodeBlock::destroy):
+            * bytecode/UnlinkedFunctionCodeBlock.cpp:
+            (JSC::UnlinkedFunctionCodeBlock::destroy):
+            * bytecode/UnlinkedFunctionExecutable.cpp:
+            (JSC::UnlinkedFunctionExecutable::destroy):
+            * bytecode/UnlinkedModuleProgramCodeBlock.cpp:
+            (JSC::UnlinkedModuleProgramCodeBlock::destroy):
+            * bytecode/UnlinkedProgramCodeBlock.cpp:
+            (JSC::UnlinkedProgramCodeBlock::destroy):
+            * heap/CodeBlockSet.cpp:
+            (JSC::CodeBlockSet::lastChanceToFinalize):
+            (JSC::CodeBlockSet::deleteUnmarkedAndUnreferenced):
+            * heap/MarkedAllocator.cpp:
+            (JSC::MarkedAllocator::allocateSlowCaseImpl):
+            * heap/MarkedBlock.cpp:
+            (JSC::MarkedBlock::Handle::sweep):
+            * jit/JITThunks.cpp:
+            (JSC::JITThunks::finalize):
+            * runtime/AbstractModuleRecord.cpp:
+            (JSC::AbstractModuleRecord::destroy):
+            * runtime/ExecutableBase.cpp:
+            (JSC::ExecutableBase::clearCode):
+            * runtime/JSCellInlines.h:
+            (JSC::JSCell::classInfo):
+            (JSC::JSCell::callDestructor):
+            * runtime/JSLock.h:
+            (JSC::JSLock::ownerThread):
+            * runtime/JSModuleNamespaceObject.cpp:
+            (JSC::JSModuleNamespaceObject::destroy):
+            * runtime/JSModuleRecord.cpp:
+            (JSC::JSModuleRecord::destroy):
+            * runtime/JSPropertyNameEnumerator.cpp:
+            (JSC::JSPropertyNameEnumerator::destroy):
+            * runtime/JSSegmentedVariableObject.h:
+            * runtime/SymbolTable.cpp:
+            (JSC::SymbolTable::destroy):
+            * runtime/VM.h:
+            * wasm/js/JSWebAssemblyCallee.cpp:
+            (JSC::JSWebAssemblyCallee::destroy):
+            * wasm/js/WebAssemblyModuleRecord.cpp:
+            (JSC::WebAssemblyModuleRecord::destroy):
+            * wasm/js/WebAssemblyToJSCallee.cpp:
+            (JSC::WebAssemblyToJSCallee::WebAssemblyToJSCallee):
+            (JSC::WebAssemblyToJSCallee::destroy):
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210745. rdar://problem/30019309
+
+    2017-01-13  Saam Barati  
+
+            Initialize the ArraySpecies watchpoint as Clear and transition to IsWatched once slice is called for the first time
+            https://bugs.webkit.org/show_bug.cgi?id=167017
+            
+
+            Reviewed by Keith Miller and Filip Pizlo.
+
+            This patch is to reverse the JSBench regression from r210695.
+
+            The new state diagram for the array species watchpoint is as
+            follows:
+
+            1. On GlobalObject construction, it starts life out as ClearWatchpoint.
+            2. When slice is called for the first time, we observe the state
+            of the world, and either transition it to IsWatched if we were able
+            to set up the object property conditions, or to IsInvalidated if we
+            were not.
+            3. The DFG compiler will now only lower slice as an intrinsic if
+            it observed the speciesWatchpoint.state() as IsWatched.
+            4. The IsWatched => IsInvalidated transition happens only when
+            one of the object property condition watchpoints fire.
+
+            * dfg/DFGByteCodeParser.cpp:
+            (JSC::DFG::ByteCodeParser::handleIntrinsicCall):
+            * runtime/ArrayPrototype.cpp:
+            (JSC::speciesWatchpointIsValid):
+            (JSC::speciesConstructArray):
+            (JSC::arrayProtoPrivateFuncConcatMemcpy):
+            (JSC::ArrayPrototype::tryInitializeSpeciesWatchpoint):
+            (JSC::ArrayPrototype::initializeSpeciesWatchpoint): Deleted.
+            * runtime/ArrayPrototype.h:
+            * runtime/JSGlobalObject.cpp:
+            (JSC::JSGlobalObject::JSGlobalObject):
+            (JSC::JSGlobalObject::init):
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210695. rdar://problem/29913445
+
+    2017-01-12  Saam Barati  
+
+            Add a slice intrinsic to the DFG/FTL
+            https://bugs.webkit.org/show_bug.cgi?id=166707
+            
+
+            Reviewed by Filip Pizlo.
+
+            The gist of this patch is to inline Array.prototype.slice
+            into the DFG/FTL. The implementation in the DFG-backend
+            and FTLLowerDFGToB3 is just a straight forward implementation
+            of what the C function is doing. The more interesting bits
+            of this patch are setting up the proper watchpoints and conditions
+            in the executing code to prove that its safe to skip all of the
+            observable JS actions that Array.prototype.slice normally does.
+
+            We perform the following proofs:
+            1. Array.prototype.constructor has not changed (via a watchpoint).
+            2. That Array.prototype.constructor[Symbol.species] has not changed (via a watchpoint).
+            3. The global object is not having a bad time.
+            4. The array that is being sliced has an original array structure.
+            5. Array.prototype/Object.prototype have not transitioned.
+
+            Conditions 1, 2, and 3 are strictly required.
+
+            4 is ensuring a couple things:
+            1. That a "constructor" property hasn't been added to the array
+            we're slicing since we're supposed to perform a Get(array, "constructor").
+            2. That we're not slicing an instance of a subclass of Array.
+
+            We could relax 4.1 in the future if we find other ways to test if
+            the incoming array hasn't changed the "constructor" property. We
+            would probably use TryGetById to do this.
+
+            I'm seeing a 5% speedup on crypto-pbkdf2 and often a 1% speedup on
+            the total benchmark (the results are sometimes noisy).
+
+            * dfg/DFGAbstractInterpreterInlines.h:
+            (JSC::DFG::AbstractInterpreter::executeEffects):
+            * dfg/DFGByteCodeParser.cpp:
+            (JSC::DFG::ByteCodeParser::handleIntrinsicCall):
+            * dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
+            (JSC::DFG::CallArrayAllocatorWithVariableStructureVariableSizeSlowPathGenerator::CallArrayAllocatorWithVariableStructureVariableSizeSlowPathGenerator):
+            * dfg/DFGClobberize.h:
+            (JSC::DFG::clobberize):
+            * dfg/DFGDoesGC.cpp:
+            (JSC::DFG::doesGC):
+            * dfg/DFGFixupPhase.cpp:
+            (JSC::DFG::FixupPhase::fixupNode):
+            * dfg/DFGNodeType.h:
+            * dfg/DFGPredictionPropagationPhase.cpp:
+            * dfg/DFGSafeToExecute.h:
+            (JSC::DFG::safeToExecute):
+            * dfg/DFGSpeculativeJIT.cpp:
+            (JSC::DFG::SpeculativeJIT::compileArraySlice):
+            (JSC::DFG::SpeculativeJIT::emitAllocateButterfly):
+            * dfg/DFGSpeculativeJIT.h:
+            * dfg/DFGSpeculativeJIT32_64.cpp:
+            (JSC::DFG::SpeculativeJIT::compile):
+            (JSC::DFG::SpeculativeJIT::emitInitializeButterfly):
+            (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+            * dfg/DFGSpeculativeJIT64.cpp:
+            (JSC::DFG::SpeculativeJIT::compile):
+            (JSC::DFG::SpeculativeJIT::emitInitializeButterfly):
+            (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+            * ftl/FTLAbstractHeapRepository.h:
+            * ftl/FTLCapabilities.cpp:
+            (JSC::FTL::canCompile):
+            * ftl/FTLLowerDFGToB3.cpp:
+            (JSC::FTL::DFG::LowerDFGToB3::compileNode):
+            (JSC::FTL::DFG::LowerDFGToB3::compileArraySlice):
+            (JSC::FTL::DFG::LowerDFGToB3::compileNewArrayWithSize):
+            (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
+            (JSC::FTL::DFG::LowerDFGToB3::initializeArrayElements):
+            (JSC::FTL::DFG::LowerDFGToB3::storeStructure):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateCell):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateObject):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateJSArray):
+            (JSC::FTL::DFG::LowerDFGToB3::allocateUninitializedContiguousJSArray):
+            * jit/AssemblyHelpers.cpp:
+            (JSC::AssemblyHelpers::emitLoadStructure):
+            * runtime/ArrayPrototype.cpp:
+            (JSC::ArrayPrototype::finishCreation):
+            (JSC::speciesWatchpointIsValid):
+            (JSC::speciesConstructArray):
+            (JSC::arrayProtoFuncSlice):
+            (JSC::arrayProtoPrivateFuncConcatMemcpy):
+            (JSC::ArrayPrototype::initializeSpeciesWatchpoint):
+            (JSC::ArrayPrototypeAdaptiveInferredPropertyWatchpoint::handleFire):
+            (JSC::speciesWatchpointsValid): Deleted.
+            (JSC::ArrayPrototype::attemptToInitializeSpeciesWatchpoint): Deleted.
+            * runtime/ArrayPrototype.h:
+            (JSC::ArrayPrototype::speciesWatchpointStatus): Deleted.
+            (): Deleted.
+            * runtime/Intrinsic.h:
+            * runtime/JSGlobalObject.cpp:
+            (JSC::JSGlobalObject::JSGlobalObject):
+            (JSC::JSGlobalObject::init):
+            * runtime/JSGlobalObject.h:
+            (JSC::JSGlobalObject::arraySpeciesWatchpoint):
+            * runtime/Structure.h:
+
+2017-01-18  Matthew Hanson  
+
+        Merge r210837. rdar://problem/29432371
+
+    2017-01-17  Michael Saboff  
+
+            Nested parenthesized regular expressions with non-zero minimum counts appear to hang and use lots of memory
+            https://bugs.webkit.org/show_bug.cgi?id=167125
+
+            Reviewed by Filip Pizlo.
+
+            Changed Yarr to handle nested parenthesized subexpressions where the minimum count is
+            not 0 directly in the Yarr interpreter.  Previously we'd factor an expression like
+            (a|b)+ into (a|b)(a|b)* with special handling for captures.  This factoring was done
+            using a deep copy that doubled the size of the resulting expresion for each nested
+            parenthesized subexpression.  Now the Yarr interpreter can directly process a regexp
+            like (a|b){2,42}.
+
+            The parser will allow one level of nested, non-zero minimum, counted parenthesis using
+            the old copy method.  After one level, it will generate parenthesis terms with a non-zero
+            minimum.   Such an expression wasn't handled by the Yarr JIT before the change, so this
+            change isn't a performance regression.
+
+            Added a minimum count to the YarrPattern and ByteTerm classes, and then factored that
+            minimum into the interpreter.  A non-zero minimum is only handled by the Yarr interpreter.
+            If the Yarr JIT see such a term, it punts back to the interpreter.
+
+            * yarr/YarrInterpreter.cpp:
+            (JSC::Yarr::Interpreter::backtrackPatternCharacter):
+            (JSC::Yarr::Interpreter::backtrackPatternCasedCharacter):
+            (JSC::Yarr::Interpreter::matchCharacterClass):
+            (JSC::Yarr::Interpreter::backtrackCharacterClass):
+            (JSC::Yarr::Interpreter::matchBackReference):
+            (JSC::Yarr::Interpreter::backtrackBackReference):
+            (JSC::Yarr::Interpreter::matchParenthesesOnceBegin):
+            (JSC::Yarr::Interpreter::matchParenthesesOnceEnd):
+            (JSC::Yarr::Interpreter::backtrackParenthesesOnceBegin):
+            (JSC::Yarr::Interpreter::backtrackParenthesesOnceEnd):
+            (JSC::Yarr::Interpreter::matchParenthesesTerminalBegin):
+            (JSC::Yarr::Interpreter::backtrackParenthesesTerminalBegin):
+            (JSC::Yarr::Interpreter::matchParentheticalAssertionBegin):
+            (JSC::Yarr::Interpreter::matchParentheticalAssertionEnd):
+            (JSC::Yarr::Interpreter::backtrackParentheticalAssertionBegin):
+            (JSC::Yarr::Interpreter::backtrackParentheticalAssertionEnd):
+            (JSC::Yarr::Interpreter::matchParentheses):
+            (JSC::Yarr::Interpreter::backtrackParentheses):
+            (JSC::Yarr::Interpreter::matchDisjunction):
+            (JSC::Yarr::ByteCompiler::atomPatternCharacter):
+            (JSC::Yarr::ByteCompiler::atomCharacterClass):
+            (JSC::Yarr::ByteCompiler::atomBackReference):
+            (JSC::Yarr::ByteCompiler::atomParentheticalAssertionEnd):
+            (JSC::Yarr::ByteCompiler::atomParenthesesSubpatternEnd):
+            (JSC::Yarr::ByteCompiler::atomParenthesesOnceEnd):
+            (JSC::Yarr::ByteCompiler::atomParenthesesTerminalEnd):
+            (JSC::Yarr::ByteCompiler::emitDisjunction):
+            * yarr/YarrInterpreter.h:
+            (JSC::Yarr::ByteTerm::ByteTerm):
+            * yarr/YarrJIT.cpp:
+            (JSC::Yarr::YarrGenerator::generatePatternCharacterOnce):
+            (JSC::Yarr::YarrGenerator::generatePatternCharacterFixed):
+            (JSC::Yarr::YarrGenerator::generatePatternCharacterGreedy):
+            (JSC::Yarr::YarrGenerator::backtrackPatternCharacterNonGreedy):
+            (JSC::Yarr::YarrGenerator::generateCharacterClassFixed):
+            (JSC::Yarr::YarrGenerator::generateCharacterClassGreedy):
+            (JSC::Yarr::YarrGenerator::backtrackCharacterClassNonGreedy):
+            (JSC::Yarr::YarrGenerator::generateTerm):
+            (JSC::Yarr::YarrGenerator::backtrackTerm):
+            (JSC::Yarr::YarrGenerator::generate):
+            (JSC::Yarr::YarrGenerator::backtrack):
+            (JSC::Yarr::YarrGenerator::opCompileParenthesesSubpattern):
+            * yarr/YarrPattern.cpp:
+            (JSC::Yarr::YarrPatternConstructor::copyTerm):
+            (JSC::Yarr::YarrPatternConstructor::quantifyAtom):
+            (JSC::Yarr::YarrPatternConstructor::checkForTerminalParentheses):
+            (JSC::Yarr::YarrPattern::YarrPattern):
+            * yarr/YarrPattern.h:
+            (JSC::Yarr::PatternTerm::PatternTerm):
+            (JSC::Yarr::PatternTerm::quantify):
+            (JSC::Yarr::YarrPattern::reset):
+
+2017-01-13  Matthew Hanson  
+
+        Merge r210694. rdar://problem/29983526
+
+    2017-01-12  Saam Barati  
+
+            Concurrent GC has a bug where we would detect a race but fail to rescan the object
+            https://bugs.webkit.org/show_bug.cgi?id=166960
+            
+
+            Reviewed by Filip Pizlo and Mark Lam.
+
+            We have code like this in JSC:
+
+            ```
+            Butterfly* butterfly = allocateMoreOutOfLineStorage(vm, oldOutOfLineCapacity, newOutOfLineCapacity);
+            nukeStructureAndSetButterfly(vm, structureID, butterfly);
+            structure->setLastOffset(newLastOffset);
+            WTF::storeStoreFence();
+            setStructureIDDirectly(structureID);
+            ```
+
+            Note that the collector could detect a race here, which sometimes
+            incorrectly caused us to not visit the object again.
+
+            Mutator Thread: M, Collector Thread: C, assuming sequential consistency via
+            proper barriers:
+
+            M: allocate new butterfly
+            M: Set nuked structure ID
+            M: Set butterfly (this does a barrier)
+            C: Start scanning O
+            C: load structure ID
+            C: See it's nuked and bail, (we used to rely on a write barrier to rescan).
+
+            We sometimes never rescanned here because we were calling
+            setStructureIDDirectly which doesn't do a write barrier.
+            (Note, the places that do this but call setStructure were
+            OK because setStructure will perform a write barrier.)
+
+            (This same issue also existed in places where the collector thread
+            detected races for Structure::m_offset, but places that changed
+            Structure::m_offset didn't perform a write barrier on the object
+            after changing its Structure's m_offset.)
+
+            To prevent such code from requiring every call site to perform
+            a write barrier on the object, I've changed the collector code
+            to keep a stack of cells to be revisited due to races. This stack
+            is then consulted when we do marking. Because such races are rare,
+            we have a single stack on Heap that is guarded by a lock.
+
+            * heap/Heap.cpp:
+            (JSC::Heap::Heap):
+            (JSC::Heap::~Heap):
+            (JSC::Heap::markToFixpoint):
+            (JSC::Heap::endMarking):
+            (JSC::Heap::buildConstraintSet):
+            (JSC::Heap::addToRaceMarkStack):
+            * heap/Heap.h:
+            (JSC::Heap::collectorSlotVisitor):
+            (JSC::Heap::mutatorMarkStack): Deleted.
+            * heap/SlotVisitor.cpp:
+            (JSC::SlotVisitor::didRace):
+            * heap/SlotVisitor.h:
+            (JSC::SlotVisitor::didRace):
+            (JSC::SlotVisitor::didNotRace): Deleted.
+            * heap/SlotVisitorInlines.h:
+            (JSC::SlotVisitor::didNotRace): Deleted.
+            * runtime/JSObject.cpp:
+            (JSC::JSObject::visitButterfly):
+            (JSC::JSObject::visitButterflyImpl):
+            * runtime/JSObjectInlines.h:
+            (JSC::JSObject::prepareToPutDirectWithoutTransition):
+            * runtime/Structure.cpp:
+            (JSC::Structure::flattenDictionaryStructure):
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210609. rdar://problem/27896585
+
+    2017-01-11  Andreas Kling  
+
+            Crash when WebCore's GC heap grows way too large.
+            
+            
+
+            Reviewed by Mark Lam.
+
+            Add a simple API to JSC::Heap that allows setting a hard limit on the amount
+            of live bytes. If this is exceeded, we crash with a recognizable signature.
+            By default there is no limit.
+
+            * heap/Heap.cpp:
+            (JSC::Heap::didExceedMaxLiveSize):
+            (JSC::Heap::updateAllocationLimits):
+            * heap/Heap.h:
+            (JSC::Heap::setMaxLiveSize):
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210565. rdar://problem/29942167
+
+    2017-01-09  Filip Pizlo  
+
+            Streamline the GC barrier slowpath
+            https://bugs.webkit.org/show_bug.cgi?id=166878
+
+            Reviewed by Geoffrey Garen and Saam Barati.
+
+            This implements two optimizations to the barrier:
+
+            - Removes the write barrier buffer. This was just overhead.
+
+            - Teaches the slow path how to white an object that was black but unmarked, ensuring that
+              we don't take slow path for this object again.
+
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * dfg/DFGSpeculativeJIT.cpp:
+            (JSC::DFG::SpeculativeJIT::compileStoreBarrier):
+            * ftl/FTLLowerDFGToB3.cpp:
+            (JSC::FTL::DFG::LowerDFGToB3::emitStoreBarrier):
+            * heap/CellState.h:
+            * heap/Heap.cpp:
+            (JSC::Heap::Heap):
+            (JSC::Heap::markToFixpoint):
+            (JSC::Heap::addToRememberedSet):
+            (JSC::Heap::stopTheWorld):
+            (JSC::Heap::writeBarrierSlowPath):
+            (JSC::Heap::buildConstraintSet):
+            (JSC::Heap::flushWriteBarrierBuffer): Deleted.
+            * heap/Heap.h:
+            (JSC::Heap::writeBarrierBuffer): Deleted.
+            * heap/SlotVisitor.cpp:
+            (JSC::SlotVisitor::appendJSCellOrAuxiliary):
+            (JSC::SlotVisitor::setMarkedAndAppendToMarkStack):
+            (JSC::SlotVisitor::appendToMarkStack):
+            (JSC::SlotVisitor::visitChildren):
+            * heap/WriteBarrierBuffer.cpp: Removed.
+            * heap/WriteBarrierBuffer.h: Removed.
+            * jit/JITOperations.cpp:
+            * jit/JITOperations.h:
+            * runtime/JSCellInlines.h:
+            (JSC::JSCell::JSCell):
+            * runtime/StructureIDBlob.h:
+            (JSC::StructureIDBlob::StructureIDBlob):
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210563. rdar://problem/29940224
+
+    2017-01-10  Mark Lam  
+
+            Property setters should not be called for bound arguments list entries.
+            https://bugs.webkit.org/show_bug.cgi?id=165631
+
+            Reviewed by Filip Pizlo.
+
+            * builtins/FunctionPrototype.js:
+            (bind):
+            - use @putByValDirect to set the bound arguments so that we don't consult the
+              prototype chain for setters.
+
+            * runtime/IntlDateTimeFormatPrototype.cpp:
+            (JSC::IntlDateTimeFormatPrototypeGetterFormat):
+            * runtime/IntlNumberFormatPrototype.cpp:
+            (JSC::IntlNumberFormatPrototypeGetterFormat):
+            - no need to create a bound arguments array because these bound functions binds
+              no arguments according to the spec.
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210553. rdar://problem/29941356
+
+    2017-01-09  Filip Pizlo  
+
+            JSArray has some object scanning races
+            https://bugs.webkit.org/show_bug.cgi?id=166874
+
+            Reviewed by Mark Lam.
+
+            This fixes two separate bugs, both of which I detected by running
+            array-splice-contiguous.js in extreme anger:
+
+            1) Some of the paths of shifting and unshifting were not grabbing the internal cell
+               lock. This was causing the array storage scan to crash, even though it was well
+               synchronized (the scan does hold the lock). The fix is just to hold the lock anywhere
+               that memmoves the innards of the butterfly.
+
+            2) Out of line property scanning was synchronized using double collect snapshot. Array
+               storage scanning was synchronized using locks. But what if array storage
+               transformations messed up the out of line properties? It turns out that we actually
+               need to hoist the array storage scanner's locking up into the double collect
+               snapshot.
+
+            I don't know how to write a test that does any better of a job of catching this than
+            array-splice-contiguous.js.
+
+            * heap/DeferGC.h: Make DisallowGC usable even if NDEBUG.
+            * runtime/JSArray.cpp:
+            (JSC::JSArray::unshiftCountSlowCase):
+            (JSC::JSArray::shiftCountWithArrayStorage):
+            (JSC::JSArray::unshiftCountWithArrayStorage):
+            * runtime/JSObject.cpp:
+            (JSC::JSObject::visitButterflyImpl):
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210530. rdar://problem/29909896
+
+    2017-01-09  Filip Pizlo  
+
+            Unreviewed, fix cloop.
+
+            * dfg/DFGPlanInlines.h:
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210521. rdar://problem/29909896
+
+    2017-01-08  Filip Pizlo  
+
+            Make the collector's fixpoint smart about scheduling work
+            https://bugs.webkit.org/show_bug.cgi?id=165910
+
+            Reviewed by Keith Miller.
+
+            Prior to this change, every time the GC would run any constraints in markToFixpoint, it
+            would run all of the constraints. It would always run them in the same order. That means
+            that so long as any one constraint was generating new work, we'd pay the price of all
+            constraints. This is usually OK because most constraints are cheap but it artificially
+            inflates the cost of slow constraints - especially ones that are expensive but usually
+            generate no new work.
+
+            This patch redoes how the GC runs constraints by applying ideas from data flow analysis.
+            The GC now builds a MarkingConstraintSet when it boots up, and this contains all of the
+            constraints as well as some meta-data about them. Now, markToFixpoint just calls into
+            MarkingConstraintSet to execute constraints. Because constraint execution and scheduling
+            need to be aware of each other, I rewrote markToFixpoint in such a way that it's more
+            obvious how the GC goes between constraint solving, marking with stopped mutator, and
+            marking with resumed mutator. This also changes the scheduler API in such a way that a
+            synchronous stop-the-world collection no longer needs to do fake stop/resume - instead we
+            just swap the space-time scheduler for the stop-the-world scheduler.
+
+            This is a big streamlining of the GC. This is a speed-up in GC-heavy tests because we
+            now execute most constraints exactly twice regardless of how many total fixpoint
+            iterations we do. Now, when we run out of marking work, the constraint solver will just
+            run the constraint that is most likely to generate new visiting work, and if it does
+            generate work, then the GC now goes back to marking. Before, it would run *all*
+            constraints and then go back to marking. The constraint solver is armed with three
+            information signals that it uses to sort the constraints in order of descending likelihood
+            to generate new marking work. Then it runs them in that order until it there is new
+            marking work. The signals are:
+
+            1) Whether the constraint is greyed by marking or execution. We call this the volatility
+               of the constraint. For example, weak reference constraints have GreyedByMarking as
+               their volatility because they are most likely to have something to say after we've done
+               some marking. On the other hand, conservative roots have GreyedByExecution as their
+               volatility because they will give new information anytime we let the mutator run. The
+               constraint solver will only run GreyedByExecution constraints as roots and after the
+               GreyedByMarking constraints go silent. This ensures that we don't try to scan
+               conservative roots every time we need to re-run weak references and vice-versa.
+
+               Another way to look at it is that the constraint solver tries to predict if the
+               wavefront is advancing or retreating. The wavefront is almost certainly advancing so
+               long as the mark stacks are non-empty or so long as at least one of the GreyedByMarking
+               constraints is still producing work. Otherwise the wavefront is almost certainly
+               retreating. It's most profitable to run GreyedByMarking constraints when the wavefront
+               is advancing, and most profitable to run GreyedByExecution constraints when the
+               wavefront is retreating.
+
+               We use the predicted wavefront direction and the volatility of constraints as a
+               first-order signal of constraint profitability.
+
+            2) How much visiting work was created the last time the constraint ran. The solver
+               remembers the lastVisitCount, and uses it to predict how much work the constraint will
+               generate next time. In practice this means we will keep re-running the one interesting
+               constraint until it shuts up.
+
+            3) Optional work predictors for some constraints. The constraint that shuffles the mutator
+               mark stack into the main SlotVisitor's mutator mark stack always knows exactly how much
+               work it will create.
+
+               The sum of (2) and (3) are used as a second-order signal of constraint profitability.
+
+            The constraint solver will always run all of the GreyedByExecution constraints at GC
+            start, since these double as the GC's roots. The constraint solver will always run all of
+            the GreyedByMarking constraints the first time that marking stalls. Other than that, the
+            solver will keep running constraints, sorted according to their likelihood to create work,
+            until either work is created or we run out of constraints to run. GC termination happens
+            when we run out of constraints to run.
+
+            This new infrastructure means that we have a much better chance of dealing with worst-case
+            DOM pathologies. If we can intelligently factor different evil DOM things into different
+            constraints with the right work predictions then this could reduce the cost of those DOM
+            things by a factor of N where N is the number of fixpoint iterations the GC typically
+            does. N is usually around 5-6 even for simple heaps.
+
+            My perf measurements say:
+
+            PLT3: 0.02% faster with 5.3% confidence.
+            JetStream: 0.15% faster with 17% confidence.
+            Speedometer: 0.58% faster with 82% confidence.
+
+            Here are the details from JetStream:
+
+            splay: 1.02173x faster with 0.996841 confidence
+            splay-latency: 1.0617x faster with 0.987462 confidence
+            towers.c: 1.01852x faster with 0.92128 confidence
+            crypto-md5: 1.06058x faster with 0.482363 confidence
+            score: 1.00152x faster with 0.16892 confidence
+
+            I think that Speedometer is legitimately benefiting from this change based on looking at
+            --logGC=true output. We are now spending less time reexecuting expensive constraints. I
+            think that JetStream/splay is also benefiting, because although the constraints it sees
+            are cheap, it spends 30% of its time in GC so even small improvements matter.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * dfg/DFGPlan.cpp:
+            (JSC::DFG::Plan::markCodeBlocks): Deleted.
+            (JSC::DFG::Plan::rememberCodeBlocks): Deleted.
+            * dfg/DFGPlan.h:
+            * dfg/DFGPlanInlines.h: Added.
+            (JSC::DFG::Plan::iterateCodeBlocksForGC):
+            * dfg/DFGWorklist.cpp:
+            (JSC::DFG::Worklist::markCodeBlocks): Deleted.
+            (JSC::DFG::Worklist::rememberCodeBlocks): Deleted.
+            (JSC::DFG::rememberCodeBlocks): Deleted.
+            * dfg/DFGWorklist.h:
+            * dfg/DFGWorklistInlines.h: Added.
+            (JSC::DFG::iterateCodeBlocksForGC):
+            (JSC::DFG::Worklist::iterateCodeBlocksForGC):
+            * heap/CodeBlockSet.cpp:
+            (JSC::CodeBlockSet::writeBarrierCurrentlyExecuting): Deleted.
+            * heap/CodeBlockSet.h:
+            (JSC::CodeBlockSet::iterate): Deleted.
+            * heap/CodeBlockSetInlines.h:
+            (JSC::CodeBlockSet::iterate):
+            (JSC::CodeBlockSet::iterateCurrentlyExecuting):
+            * heap/Heap.cpp:
+            (JSC::Heap::Heap):
+            (JSC::Heap::iterateExecutingAndCompilingCodeBlocks):
+            (JSC::Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks):
+            (JSC::Heap::assertSharedMarkStacksEmpty):
+            (JSC::Heap::markToFixpoint):
+            (JSC::Heap::endMarking):
+            (JSC::Heap::collectInThread):
+            (JSC::Heap::stopIfNecessarySlow):
+            (JSC::Heap::acquireAccessSlow):
+            (JSC::Heap::collectIfNecessaryOrDefer):
+            (JSC::Heap::buildConstraintSet):
+            (JSC::Heap::notifyIsSafeToCollect):
+            (JSC::Heap::ResumeTheWorldScope::ResumeTheWorldScope): Deleted.
+            (JSC::Heap::ResumeTheWorldScope::~ResumeTheWorldScope): Deleted.
+            (JSC::Heap::harvestWeakReferences): Deleted.
+            (JSC::Heap::visitConservativeRoots): Deleted.
+            (JSC::Heap::visitCompilerWorklistWeakReferences): Deleted.
+            * heap/Heap.h:
+            * heap/MarkingConstraint.cpp: Added.
+            (JSC::MarkingConstraint::MarkingConstraint):
+            (JSC::MarkingConstraint::~MarkingConstraint):
+            (JSC::MarkingConstraint::resetStats):
+            (JSC::MarkingConstraint::execute):
+            * heap/MarkingConstraint.h: Added.
+            (JSC::MarkingConstraint::index):
+            (JSC::MarkingConstraint::abbreviatedName):
+            (JSC::MarkingConstraint::name):
+            (JSC::MarkingConstraint::lastVisitCount):
+            (JSC::MarkingConstraint::quickWorkEstimate):
+            (JSC::MarkingConstraint::workEstimate):
+            (JSC::MarkingConstraint::volatility):
+            * heap/MarkingConstraintSet.cpp: Added.
+            (JSC::MarkingConstraintSet::ExecutionContext::ExecutionContext):
+            (JSC::MarkingConstraintSet::ExecutionContext::didVisitSomething):
+            (JSC::MarkingConstraintSet::ExecutionContext::shouldTimeOut):
+            (JSC::MarkingConstraintSet::ExecutionContext::drain):
+            (JSC::MarkingConstraintSet::ExecutionContext::didExecute):
+            (JSC::MarkingConstraintSet::ExecutionContext::execute):
+            (JSC::MarkingConstraintSet::MarkingConstraintSet):
+            (JSC::MarkingConstraintSet::~MarkingConstraintSet):
+            (JSC::MarkingConstraintSet::resetStats):
+            (JSC::MarkingConstraintSet::add):
+            (JSC::MarkingConstraintSet::executeBootstrap):
+            (JSC::MarkingConstraintSet::executeConvergence):
+            (JSC::MarkingConstraintSet::isWavefrontAdvancing):
+            (JSC::MarkingConstraintSet::executeConvergenceImpl):
+            (JSC::MarkingConstraintSet::executeAll):
+            * heap/MarkingConstraintSet.h: Added.
+            (JSC::MarkingConstraintSet::isWavefrontRetreating):
+            * heap/MutatorScheduler.cpp: Added.
+            (JSC::MutatorScheduler::MutatorScheduler):
+            (JSC::MutatorScheduler::~MutatorScheduler):
+            (JSC::MutatorScheduler::didStop):
+            (JSC::MutatorScheduler::willResume):
+            (JSC::MutatorScheduler::didExecuteConstraints):
+            (JSC::MutatorScheduler::log):
+            (JSC::MutatorScheduler::shouldStop):
+            (JSC::MutatorScheduler::shouldResume):
+            * heap/MutatorScheduler.h: Added.
+            * heap/OpaqueRootSet.h:
+            (JSC::OpaqueRootSet::add):
+            * heap/SlotVisitor.cpp:
+            (JSC::SlotVisitor::visitAsConstraint):
+            (JSC::SlotVisitor::drain):
+            (JSC::SlotVisitor::didReachTermination):
+            (JSC::SlotVisitor::hasWork):
+            (JSC::SlotVisitor::drainFromShared):
+            (JSC::SlotVisitor::drainInParallelPassively):
+            (JSC::SlotVisitor::addOpaqueRoot):
+            * heap/SlotVisitor.h:
+            (JSC::SlotVisitor::addToVisitCount):
+            * heap/SpaceTimeMutatorScheduler.cpp: Copied from Source/JavaScriptCore/heap/SpaceTimeScheduler.cpp.
+            (JSC::SpaceTimeMutatorScheduler::Snapshot::Snapshot):
+            (JSC::SpaceTimeMutatorScheduler::Snapshot::now):
+            (JSC::SpaceTimeMutatorScheduler::Snapshot::bytesAllocatedThisCycle):
+            (JSC::SpaceTimeMutatorScheduler::SpaceTimeMutatorScheduler):
+            (JSC::SpaceTimeMutatorScheduler::~SpaceTimeMutatorScheduler):
+            (JSC::SpaceTimeMutatorScheduler::state):
+            (JSC::SpaceTimeMutatorScheduler::beginCollection):
+            (JSC::SpaceTimeMutatorScheduler::didStop):
+            (JSC::SpaceTimeMutatorScheduler::willResume):
+            (JSC::SpaceTimeMutatorScheduler::didExecuteConstraints):
+            (JSC::SpaceTimeMutatorScheduler::timeToStop):
+            (JSC::SpaceTimeMutatorScheduler::timeToResume):
+            (JSC::SpaceTimeMutatorScheduler::log):
+            (JSC::SpaceTimeMutatorScheduler::endCollection):
+            (JSC::SpaceTimeMutatorScheduler::bytesAllocatedThisCycleImpl):
+            (JSC::SpaceTimeMutatorScheduler::bytesSinceBeginningOfCycle):
+            (JSC::SpaceTimeMutatorScheduler::maxHeadroom):
+            (JSC::SpaceTimeMutatorScheduler::headroomFullness):
+            (JSC::SpaceTimeMutatorScheduler::mutatorUtilization):
+            (JSC::SpaceTimeMutatorScheduler::collectorUtilization):
+            (JSC::SpaceTimeMutatorScheduler::elapsedInPeriod):
+            (JSC::SpaceTimeMutatorScheduler::phase):
+            (JSC::SpaceTimeMutatorScheduler::shouldBeResumed):
+            (JSC::SpaceTimeScheduler::Decision::targetMutatorUtilization): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::targetCollectorUtilization): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::elapsedInPeriod): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::phase): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::shouldBeResumed): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::timeToResume): Deleted.
+            (JSC::SpaceTimeScheduler::Decision::timeToStop): Deleted.
+            (JSC::SpaceTimeScheduler::SpaceTimeScheduler): Deleted.
+            (JSC::SpaceTimeScheduler::snapPhase): Deleted.
+            (JSC::SpaceTimeScheduler::currentDecision): Deleted.
+            * heap/SpaceTimeMutatorScheduler.h: Copied from Source/JavaScriptCore/heap/SpaceTimeScheduler.h.
+            (JSC::SpaceTimeScheduler::Decision::operator bool): Deleted.
+            * heap/SpaceTimeScheduler.cpp: Removed.
+            * heap/SpaceTimeScheduler.h: Removed.
+            * heap/SynchronousStopTheWorldMutatorScheduler.cpp: Added.
+            (JSC::SynchronousStopTheWorldMutatorScheduler::SynchronousStopTheWorldMutatorScheduler):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::~SynchronousStopTheWorldMutatorScheduler):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::state):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::beginCollection):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::timeToStop):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::timeToResume):
+            (JSC::SynchronousStopTheWorldMutatorScheduler::endCollection):
+            * heap/SynchronousStopTheWorldMutatorScheduler.h: Added.
+            * heap/VisitingTimeout.h: Added.
+            (JSC::VisitingTimeout::VisitingTimeout):
+            (JSC::VisitingTimeout::visitCount):
+            (JSC::VisitingTimeout::didVisitSomething):
+            (JSC::VisitingTimeout::shouldTimeOut):
+            * runtime/Options.h:
+
+2017-01-12  Matthew Hanson  
+
+        Merge r210457. rdar://problem/27330808
+
+    2017-01-06  Michael Saboff  
+
+            @putByValDirect in Array.of and Array.from overwrites non-writable/configurable properties
+            https://bugs.webkit.org/show_bug.cgi?id=153486
+
+            Reviewed by Saam Barati.
+
+            Moved read only check in putDirect() to all paths.
+
+            * runtime/SparseArrayValueMap.cpp:
+            (JSC::SparseArrayValueMap::putDirect):
+
+2017-01-11  Matthew Hanson  
+
+        Merge r210451. rdar://problem/29909812
+
+    2016-12-30  Filip Pizlo  
+
+            DeferGC::~DeferGC should be super cheap
+            https://bugs.webkit.org/show_bug.cgi?id=166626
+
+            Reviewed by Saam Barati.
+
+            Right now, ~DeferGC requires running the collector's full collectIfNecessaryOrDefer()
+            hook, which is super big. Normally, that hook would only be called from GC slow paths,
+            so it ought to be possible to add complex logic to it. It benefits the GC algorithm to
+            make that code smart, not necessarily fast.
+
+            The right thing for it to do is to have ~DeferGC check a boolean to see if
+            collectIfNecessaryOrDefer() had previously deferred anything, and only call it if that
+            is true. That's what this patch does.
+
+            Unfortunately, this means that we lose the collectAccordingToDeferGCProbability mode,
+            which we used for two tests. Since I could only see two tests that used this mode, I
+            felt that it was better to enhance the GC than to keep the tests. I filed bug 166627 to
+            bring back something like that mode.
+
+            Although this patch does make some paths faster, its real goal is to ensure that bug
+            165963 can add more logic to collectIfNecessaryOrDefer() without introducing a big
+            regression. Until then, I wouldn't be surprised if this patch was a progression, but I'm
+            not betting on it.
+
+            * heap/Heap.cpp:
+            (JSC::Heap::collectIfNecessaryOrDefer):
+            (JSC::Heap::decrementDeferralDepthAndGCIfNeededSlow):
+            (JSC::Heap::canCollect): Deleted.
+            (JSC::Heap::shouldCollectHeuristic): Deleted.
+            (JSC::Heap::shouldCollect): Deleted.
+            (JSC::Heap::collectAccordingToDeferGCProbability): Deleted.
+            (JSC::Heap::decrementDeferralDepthAndGCIfNeeded): Deleted.
+            * heap/Heap.h:
+            * heap/HeapInlines.h:
+            (JSC::Heap::incrementDeferralDepth):
+            (JSC::Heap::decrementDeferralDepth):
+            (JSC::Heap::decrementDeferralDepthAndGCIfNeeded):
+            (JSC::Heap::mayNeedToStop):
+            (JSC::Heap::stopIfNecessary):
+            * runtime/Options.h:
+
+2017-01-11  Matthew Hanson  
+
+        Merge r210398. rdar://problem/29229439
+
+    2017-01-05  Filip Pizlo  
+
+            AutomaticThread timeout shutdown leaves a small window where notify() would think that the thread is still running
+            https://bugs.webkit.org/show_bug.cgi?id=166742
+
+            Reviewed by Geoffrey Garen.
+
+            Update to new AutomaticThread API.
+
+            * dfg/DFGWorklist.cpp:
+
+2017-01-10  Matthew Hanson  
+
+        Rollout r210336. rdar://problem/29912353
+
+2017-01-09  Babak Shafiei  
+
+        Merge r210458. rdar://problem/29911919
+
+    2017-01-06  Mark Lam  
+
+            The ObjC API's JSVirtualMachine's map tables need to be guarded by a lock.
+            https://bugs.webkit.org/show_bug.cgi?id=166778
+            
+
+            Reviewed by Filip Pizlo.
+
+            Now that we have a concurrent GC, access to JSVirtualMachine's
+            m_externalObjectGraph and m_externalRememberedSet need to be guarded by a lock
+            since both the GC marker thread and the mutator thread may access them at the
+            same time.
+
+            * API/JSVirtualMachine.mm:
+            (-[JSVirtualMachine addExternalRememberedObject:]):
+            (-[JSVirtualMachine addManagedReference:withOwner:]):
+            (-[JSVirtualMachine removeManagedReference:withOwner:]):
+            (-[JSVirtualMachine externalDataMutex]):
+            (scanExternalObjectGraph):
+            (scanExternalRememberedSet):
+
+            * API/JSVirtualMachineInternal.h:
+            - Deleted externalObjectGraph method.  There's no need to expose this.
+
+2017-01-06  Matthew Hanson  
+
+        Revert the following merges as part of disabling Web Assembly. rdar://problem/29890343
+
+        rdar://problem/29735737
+        rdar://problem/29747874
+        rdar://problem/29758107
+        rdar://problem/29759741
+        rdar://problem/29760322
+        rdar://problem/29760326
+        rdar://problem/29760386
+        rdar://problem/29760621
+        rdar://problem/29762017
+        rdar://problem/29782821
+        rdar://problem/29782833
+        rdar://problem/29784532
+        rdar://problem/29791695
+        rdar://problem/29793220
+        rdar://problem/29793949
+        rdar://problem/29795709
+        rdar://problem/29803676
+        rdar://problem/29814999
+        rdar://problem/29815000
+        rdar://problem/29841541
+        rdar://problem/29844107
+        rdar://problem/29856455
+
+2017-01-06  Matthew Hanson  
+
+        Disable WebAssembly. rdar://problem/29890343
+
+        Landed on behalf of JF Bastien.
+
+        * runtime/Options.h:
+
+2017-01-06  Matthew Hanson  
+
+        Merge r210276. rdar://problem/28867002
+
+    2017-01-04  Saam Barati  
+
+            We don't properly handle exceptions inside the nativeCallTrampoline macro in the LLInt
+            https://bugs.webkit.org/show_bug.cgi?id=163720
+
+            Reviewed by Mark Lam.
+
+            In the LLInt, we were incorrectly doing the exception check after the call.
+            Before the exception check, we were unwinding to our caller's
+            frame under the assumption that our caller was always a JS frame.
+            This is incorrect, however, because our caller might be a C frame.
+            One way that it can be a C frame is when C calls to JS, and JS tail
+            calls to native. This patch fixes this bug by doing unwinding from
+            the native callee's frame instead of its callers.
+
+            * llint/LowLevelInterpreter32_64.asm:
+            * llint/LowLevelInterpreter64.asm:
+
+2017-01-06  Matthew Hanson  
+
+        Merge r210259. rdar://problem/29856455
+
+    2017-01-03  JF Bastien  
+
+            REGRESSION (r210244): Release JSC Stress test failure: wasm.yaml/wasm/js-api/wasm-to-wasm.js.default-wasm
+            https://bugs.webkit.org/show_bug.cgi?id=166669
+            
+
+            Reviewed by Saam Barati.
+
+            Bug #165282 added wasm -> wasm calls, but caused crashes in
+            release builds because the pinned registers are also callee-saved
+            and were being clobbered. B3 didn't see itself clobbering them
+            when no memory was used, and therefore omitted a restore.
+
+            This was causing the C++ code in callWebAssemblyFunction to crash
+            because $r12 was 0, and it expected it to have its value prior to
+            the call.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::createJSToWasmWrapper):
+
+2017-01-06  Matthew Hanson  
+
+        Merge r210221. rdar://problem/29449474
+
+    2017-01-01  Jeff Miller  
+
+            Update user-visible copyright strings to include 2017
+            https://bugs.webkit.org/show_bug.cgi?id=166278
+
+            Reviewed by Dan Bernstein.
+
+            * Info.plist:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210282. rdar://problem/29760326
+
+    2017-01-04  JF Bastien  
+
+            WebAssembly JS API: add Module.sections
+            https://bugs.webkit.org/show_bug.cgi?id=165159
+            
+
+            Reviewed by Mark Lam.
+
+            As described in: https://github.com/WebAssembly/design/blob/master/JS.md#webassemblymodulecustomsections
+
+            This was added for Emscripten, and is likely to be used soon.
+
+            * wasm/WasmFormat.h: custom sections are just name + bytes
+            * wasm/WasmModuleParser.cpp: parse them, instead of skipping over
+            * wasm/WasmModuleParser.h:
+            * wasm/js/WebAssemblyModulePrototype.cpp: construct the Array of
+            ArrayBuffer as described in the spec
+            (JSC::webAssemblyModuleProtoCustomSections):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210229. rdar://problem/29760322
+
+    2017-01-02  JF Bastien  
+
+            WebAssembly: handle and optimize wasm export → wasm import calls
+            https://bugs.webkit.org/show_bug.cgi?id=165282
+
+            Reviewed by Saam Barati.
+
+              - Add a new JSType for WebAssemblyFunction, and use it when creating its
+                structure. This will is used to quickly detect from wasm whether the import
+                call is to another wasm module, or whether it's to JS.
+              - Generate two stubs from the import stub generator: one for wasm->JS and one
+                for wasm -> wasm. This is done at Module time. Which is called will only be
+                known at Instance time, once we've received the import object. We want to
+                avoid codegen at Instance time, so having both around is great.
+              - Restore the WebAssembly global state (VM top Instance, and pinned registers)
+                after call / call_indirect, and in the JS->wasm entry stub.
+              - Pinned registers are now a global thing, not per-Memory, because the wasm ->
+                wasm stubs are generated at Module time where we don't really have enough
+                information to do the right thing (doing so would generate too much code).
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * runtime/JSType.h: add WebAssemblyFunctionType as a JSType
+            * wasm/WasmB3IRGenerator.cpp: significantly rework how calls which
+            could be external work, and how we save / restore global state:
+            VM's top Instance, and pinned registers
+            (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+            (JSC::Wasm::getMemoryBaseAndSize):
+            (JSC::Wasm::restoreWebAssemblyGlobalState):
+            (JSC::Wasm::createJSToWasmWrapper):
+            (JSC::Wasm::parseAndCompile):
+            * wasm/WasmB3IRGenerator.h:
+            * wasm/WasmBinding.cpp:
+            (JSC::Wasm::materializeImportJSCell):
+            (JSC::Wasm::wasmToJS):
+            (JSC::Wasm::wasmToWasm): the main goal of this patch was adding this function
+            (JSC::Wasm::exitStubGenerator):
+            * wasm/WasmBinding.h:
+            * wasm/WasmFormat.h: Get rid of much of the function index space:
+            we already have all of its information elsewhere, and as-is it
+            provides no extra efficiency.
+            (JSC::Wasm::ModuleInformation::functionIndexSpaceSize):
+            (JSC::Wasm::ModuleInformation::isImportedFunctionFromFunctionIndexSpace):
+            (JSC::Wasm::ModuleInformation::signatureIndexFromFunctionIndexSpace):
+            * wasm/WasmFunctionParser.h:
+            (JSC::Wasm::FunctionParser::FunctionParser):
+            * wasm/WasmMemory.cpp: Add some logging.
+            (JSC::Wasm::Memory::dump): this was nice when debugging
+            (JSC::Wasm::Memory::makeString):
+            (JSC::Wasm::Memory::Memory):
+            (JSC::Wasm::Memory::~Memory):
+            (JSC::Wasm::Memory::grow):
+            * wasm/WasmMemory.h: don't use extra indirection, it wasn't
+            needed. Reorder some of the fields which are looked up at runtime
+            so they're more cache-friendly.
+            (JSC::Wasm::Memory::Memory):
+            (JSC::Wasm::Memory::mode):
+            (JSC::Wasm::Memory::offsetOfSize):
+            * wasm/WasmMemoryInformation.cpp: Pinned registers are now a
+            global thing for all of JSC, not a per-Memory thing
+            anymore. wasm->wasm calls are more complex otherwise: they have to
+            figure out how to bridge between the caller and callee's
+            special-snowflake pinning.
+            (JSC::Wasm::PinnedRegisterInfo::get):
+            (JSC::Wasm::PinnedRegisterInfo::PinnedRegisterInfo):
+            (JSC::Wasm::MemoryInformation::MemoryInformation):
+            * wasm/WasmMemoryInformation.h:
+            * wasm/WasmModuleParser.cpp:
+            * wasm/WasmModuleParser.h:
+            * wasm/WasmPageCount.cpp: Copied from Source/JavaScriptCore/wasm/WasmBinding.h.
+            (JSC::Wasm::PageCount::dump): nice for debugging
+            * wasm/WasmPageCount.h:
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::parseAndValidateModule):
+            (JSC::Wasm::Plan::run):
+            * wasm/WasmPlan.h:
+            (JSC::Wasm::Plan::takeWasmExitStubs):
+            * wasm/WasmSignature.cpp:
+            (JSC::Wasm::Signature::toString):
+            (JSC::Wasm::Signature::dump):
+            * wasm/WasmSignature.h:
+            * wasm/WasmValidate.cpp:
+            (JSC::Wasm::validateFunction):
+            * wasm/WasmValidate.h:
+            * wasm/js/JSWebAssemblyInstance.h:
+            (JSC::JSWebAssemblyInstance::offsetOfTable):
+            (JSC::JSWebAssemblyInstance::offsetOfImportFunctions):
+            (JSC::JSWebAssemblyInstance::offsetOfImportFunction):
+            * wasm/js/JSWebAssemblyMemory.cpp:
+            (JSC::JSWebAssemblyMemory::create):
+            (JSC::JSWebAssemblyMemory::JSWebAssemblyMemory):
+            (JSC::JSWebAssemblyMemory::buffer):
+            (JSC::JSWebAssemblyMemory::grow):
+            * wasm/js/JSWebAssemblyMemory.h:
+            (JSC::JSWebAssemblyMemory::memory):
+            (JSC::JSWebAssemblyMemory::offsetOfMemory):
+            (JSC::JSWebAssemblyMemory::offsetOfSize):
+            * wasm/js/JSWebAssemblyModule.cpp:
+            (JSC::JSWebAssemblyModule::create):
+            (JSC::JSWebAssemblyModule::JSWebAssemblyModule):
+            * wasm/js/JSWebAssemblyModule.h:
+            (JSC::JSWebAssemblyModule::signatureIndexFromFunctionIndexSpace):
+            (JSC::JSWebAssemblyModule::functionImportCount):
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction):
+            (JSC::WebAssemblyFunction::create):
+            (JSC::WebAssemblyFunction::createStructure):
+            (JSC::WebAssemblyFunction::WebAssemblyFunction):
+            (JSC::WebAssemblyFunction::finishCreation):
+            * wasm/js/WebAssemblyFunction.h:
+            (JSC::WebAssemblyFunction::wasmEntrypoint):
+            (JSC::WebAssemblyFunction::offsetOfInstance):
+            (JSC::WebAssemblyFunction::offsetOfWasmEntryPointCode):
+            * wasm/js/WebAssemblyInstanceConstructor.cpp:
+            (JSC::constructJSWebAssemblyInstance): always start with a dummy
+            memory, so wasm->wasm calls don't need to null-check
+            * wasm/js/WebAssemblyMemoryConstructor.cpp:
+            (JSC::constructJSWebAssemblyMemory):
+            * wasm/js/WebAssemblyModuleConstructor.cpp:
+            (JSC::WebAssemblyModuleConstructor::createModule):
+            * wasm/js/WebAssemblyModuleRecord.cpp:
+            (JSC::WebAssemblyModuleRecord::link):
+            (JSC::WebAssemblyModuleRecord::evaluate):
+            * wasm/js/WebAssemblyModuleRecord.h:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210202. rdar://problem/29803676
+
+    2016-12-28  Saam Barati  
+
+            Unreviewed. Fix jsc.cpp build error.
+
+            * jsc.cpp:
+            (functionTestWasmModuleFunctions):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210201. rdar://problem/29803676
+
+    2016-12-28  Saam Barati  
+
+            WebAssembly: Implement grow_memory and current_memory
+            https://bugs.webkit.org/show_bug.cgi?id=166448
+            
+
+            Reviewed by Keith Miller.
+
+            This patch implements grow_memory, current_memory, and WebAssembly.prototype.grow.
+            See relevant spec texts here:
+
+            https://github.com/WebAssembly/design/blob/master/Semantics.md#linear-memory-accesses
+            https://github.com/WebAssembly/design/blob/master/JS.md#webassemblymemoryprototypegrow
+
+            I also fix a couple miscellaneous bugs:
+
+            1. Data section now understands full init_exprs.
+            2. parseVarUint1 no longer has a bug where we allow values larger than 1 if
+            their bottom 8 bits are zero.
+
+            Since the JS API can now grow memory, we need to make calling an import
+            and call_indirect refresh the base memory register and the size registers.
+
+            * jsc.cpp:
+            (functionTestWasmModuleFunctions):
+            * runtime/Options.h:
+            * runtime/VM.h:
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+            (JSC::Wasm::reloadPinnedRegisters):
+            (JSC::Wasm::B3IRGenerator::emitReloadPinnedRegisters):
+            (JSC::Wasm::createJSToWasmWrapper):
+            (JSC::Wasm::parseAndCompile):
+            * wasm/WasmFormat.cpp:
+            (JSC::Wasm::Segment::create):
+            * wasm/WasmFormat.h:
+            (JSC::Wasm::I32InitExpr::I32InitExpr):
+            (JSC::Wasm::I32InitExpr::globalImport):
+            (JSC::Wasm::I32InitExpr::constValue):
+            (JSC::Wasm::I32InitExpr::isConst):
+            (JSC::Wasm::I32InitExpr::isGlobalImport):
+            (JSC::Wasm::I32InitExpr::globalImportIndex):
+            (JSC::Wasm::Segment::byte):
+            (JSC::Wasm::ModuleInformation::importFunctionCount):
+            (JSC::Wasm::ModuleInformation::hasMemory):
+            * wasm/WasmFunctionParser.h:
+            * wasm/WasmMemory.cpp:
+            (JSC::Wasm::Memory::Memory):
+            (JSC::Wasm::Memory::grow):
+            * wasm/WasmMemory.h:
+            (JSC::Wasm::Memory::size):
+            (JSC::Wasm::Memory::sizeInPages):
+            (JSC::Wasm::Memory::offsetOfMemory):
+            (JSC::Wasm::Memory::isValid): Deleted.
+            (JSC::Wasm::Memory::grow): Deleted.
+            * wasm/WasmModuleParser.cpp:
+            (JSC::Wasm::makeI32InitExpr):
+            * wasm/WasmModuleParser.h:
+            * wasm/WasmPageCount.h:
+            (JSC::Wasm::PageCount::bytes):
+            (JSC::Wasm::PageCount::pageCount):
+            (JSC::Wasm::PageCount::fromBytes):
+            (JSC::Wasm::PageCount::operator+):
+            * wasm/WasmParser.h:
+            (JSC::Wasm::Parser::parseVarUInt1):
+            * wasm/WasmValidate.cpp:
+            * wasm/js/JSWebAssemblyInstance.h:
+            (JSC::JSWebAssemblyInstance::offsetOfMemory):
+            * wasm/js/JSWebAssemblyMemory.cpp:
+            (JSC::JSWebAssemblyMemory::~JSWebAssemblyMemory):
+            (JSC::JSWebAssemblyMemory::grow):
+            * wasm/js/JSWebAssemblyMemory.h:
+            (JSC::JSWebAssemblyMemory::offsetOfMemory):
+            * wasm/js/JSWebAssemblyModule.h:
+            (JSC::JSWebAssemblyModule::functionImportCount):
+            (JSC::JSWebAssemblyModule::jsEntrypointCalleeFromFunctionIndexSpace):
+            (JSC::JSWebAssemblyModule::wasmEntrypointCalleeFromFunctionIndexSpace):
+            (JSC::JSWebAssemblyModule::importCount): Deleted.
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction):
+            * wasm/js/WebAssemblyInstanceConstructor.cpp:
+            (JSC::constructJSWebAssemblyInstance):
+            * wasm/js/WebAssemblyMemoryConstructor.cpp:
+            (JSC::constructJSWebAssemblyMemory):
+            * wasm/js/WebAssemblyMemoryPrototype.cpp:
+            (JSC::getMemory):
+            (JSC::webAssemblyMemoryProtoFuncBuffer):
+            (JSC::webAssemblyMemoryProtoFuncGrow):
+            * wasm/js/WebAssemblyModuleRecord.cpp:
+            (JSC::WebAssemblyModuleRecord::link):
+            (JSC::dataSegmentFail):
+            (JSC::WebAssemblyModuleRecord::evaluate):
+            * wasm/wasm.json:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210073. rdar://problem/29762017
+
+    2016-12-21  JF Bastien  
+
+            WebAssembly JS API: cleanup & pass VM around to {Compile/Runtime}Error
+            https://bugs.webkit.org/show_bug.cgi?id=166295
+            
+
+            Reviewed by Mark Lam.
+
+            Rename the create* functions, and pass VM around, as suggested for
+            LinkError in #165805.
+
+            At the same time, use the default source appender when
+            constructing these error types, which gives a nice map back to the
+            original source as part of the error message. This is clearer when
+            using the current frame, so add that as well.
+
+            * jit/ThunkGenerators.cpp:
+            (JSC::throwExceptionFromWasmThunkGenerator):
+            * wasm/js/JSWebAssemblyCompileError.cpp:
+            (JSC::JSWebAssemblyCompileError::create):
+            (JSC::createJSWebAssemblyCompileError):
+            (JSC::createWebAssemblyCompileError): Deleted.
+            * wasm/js/JSWebAssemblyCompileError.h:
+            (JSC::JSWebAssemblyCompileError::create):
+            * wasm/js/JSWebAssemblyRuntimeError.cpp:
+            (JSC::JSWebAssemblyRuntimeError::create):
+            * wasm/js/JSWebAssemblyRuntimeError.h:
+            (JSC::JSWebAssemblyRuntimeError::create):
+            * wasm/js/WebAssemblyCompileErrorConstructor.cpp:
+            (JSC::constructJSWebAssemblyCompileError):
+            * wasm/js/WebAssemblyModuleConstructor.cpp:
+            (JSC::WebAssemblyModuleConstructor::createModule):
+            * wasm/js/WebAssemblyRuntimeErrorConstructor.cpp:
+            (JSC::constructJSWebAssemblyRuntimeError):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210244. rdar://problem/29844107
+
+    2017-01-03  JF Bastien  
+
+            WebAssembly JS API: check and test in-call / out-call values
+            https://bugs.webkit.org/show_bug.cgi?id=164876
+            
+
+            Reviewed by Saam Barati.
+
+            * wasm/WasmBinding.cpp:
+            (JSC::Wasm::wasmToJs): fix the wasm -> JS call coercions for f32 /
+            f64 which the assotiated tests inadvertently tripped on: the
+            previous code wasn't correctly performing JSValue boxing for
+            "double" values. This change is slightly involved because it
+            requires two scratch registers to materialize the
+            `DoubleEncodeOffset` value. This change therefore reorganizes the
+            code to first generate traps, then handle all integers (freeing
+            all GPRs), and then all the floating-point values.
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction): Implement the defined semantics
+            for mismatched arities when JS calls wasm:
+            https://github.com/WebAssembly/design/blob/master/JS.md#exported-function-exotic-objects
+              - i32 is 0, f32 / f64 are NaN.
+              - wasm functions which return "void" are "undefined" in JS.
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210228. rdar://problem/29841541
+
+    2017-01-02  Saam Barati  
+
+            WebAssembly: Some loads don't take into account the offset
+            https://bugs.webkit.org/show_bug.cgi?id=166616
+            
+
+            Reviewed by Keith Miller.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::B3IRGenerator::emitLoadOp):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210203. rdar://problem/29815000
+
+    2016-12-28  Saam Barati  
+
+            WebAssembly: Don't allow duplicate export names
+            https://bugs.webkit.org/show_bug.cgi?id=166490
+            
+
+            Reviewed by Keith Miller.
+
+            * wasm/WasmModuleParser.cpp:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210137. rdar://problem/29760386
+
+    2016-12-23  Keith Miller  
+
+            WebAssembly: trap on bad division.
+            https://bugs.webkit.org/show_bug.cgi?id=164786
+
+            Reviewed by Mark Lam.
+
+            This patch adds traps for division / modulo by zero and for
+            division by int_min / -1.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::B3IRGenerator::emitChecksForModOrDiv):
+            * wasm/WasmExceptionType.h:
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::run):
+            * wasm/wasm.json:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210127. rdar://problem/29795709
+
+    2016-12-22  Keith Miller  
+
+            WebAssembly: Make spec-tests/f32.wast.js and spec-tests/f64.wast.js pass
+            https://bugs.webkit.org/show_bug.cgi?id=166447
+
+            Reviewed by Saam Barati.
+
+            We needed to treat -0.0 < 0.0 for floating point min/max. For min,
+            the algorithm works because if a == b then a and b are not NaNs so
+            either they are the same or they are some zero. When we or a and b
+            either we get the same number back or we get -0.0. Similarly for
+            max we use an and and the sign bit gets dropped if one is 0.0 and
+            the other is -0.0, otherwise, we get the same number back.
+
+            * wasm/wasm.json:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210126. rdar://problem/29793949
+
+    2016-12-22  Saam Barati  
+
+            WebAssembly: Make calling Wasm functions that returns or takes an i64 as a parameter an early exception
+            https://bugs.webkit.org/show_bug.cgi?id=166437
+            
+
+            Reviewed by Keith Miller.
+
+            This patch makes it so that we throw an exception before we do
+            anything else if we call a wasm function that either takes an
+            i64 as an argument or returns an i64.
+
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction):
+            (JSC::WebAssemblyFunction::WebAssemblyFunction):
+            (JSC::WebAssemblyFunction::call): Deleted.
+            * wasm/js/WebAssemblyFunction.h:
+            (JSC::WebAssemblyFunction::signatureIndex):
+            (JSC::WebAssemblyFunction::jsEntrypoint):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210115. rdar://problem/29793220
+
+    2016-12-22  Saam Barati  
+
+            WebAssembly: Make the spec-tests/address.wast.js test pass
+            https://bugs.webkit.org/show_bug.cgi?id=166429
+            
+
+            Reviewed by Keith Miller.
+
+            Right now, provably out of bound loads/stores (given a load/store's constant
+            offset) are not a validation error. However, we were failing to catch uint32_t
+            overflows in release builds (we did have a debug assertion). To fix this,
+            I now detect when uint32_t addition will overflow, and instead of emitting
+            a normal load/store, I emit code that throws an out of bounds memory exception.
+
+            * wasm/WasmB3IRGenerator.cpp:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210111. rdar://problem/29791695
+
+    2016-12-22  Keith Miller  
+
+            WebAssembly: The validator should not allow unused stack entries at the end of a block
+            https://bugs.webkit.org/show_bug.cgi?id=166411
+
+            Reviewed by Saam Barati.
+
+            This patch also cleans up some of the verbose mode logging.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::dumpExpressionStack):
+            (JSC::Wasm::B3IRGenerator::dump):
+            * wasm/WasmFunctionParser.h:
+            * wasm/WasmValidate.cpp:
+            (JSC::Wasm::dumpExpressionStack):
+            (JSC::Wasm::Validate::dump):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210102. rdar://problem/29784532
+
+    2016-12-22  Saam Barati  
+
+            WebAssembly: Make the spec-tests/start.wast.js test pass
+            https://bugs.webkit.org/show_bug.cgi?id=166416
+            
+
+            Reviewed by Yusuke Suzuki.
+
+            To make the test run, I had to fix two bugs:
+
+            1. We weren't properly finding the start function. There was code
+            that would try to find the start function from the list of *exported*
+            functions. This is wrong; the start function is an index into the
+            function index space, which is the space for *imports* and *local*
+            functions. So the code was just wrong in this respect, and I've
+            fixed it do the right thing. We weren't sure if this was originally
+            allowed or not in the spec, but it has been decided that it is allowed
+            and the spec-tests test for it: https://github.com/WebAssembly/design/issues/896
+
+            2. We were emitting a breakpoint for Unreachable. Instead of crashing,
+            this opcode needs to throw an exception when executing.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            * wasm/WasmExceptionType.h:
+            * wasm/js/WebAssemblyModuleRecord.cpp:
+            (JSC::WebAssemblyModuleRecord::link):
+            (JSC::WebAssemblyModuleRecord::evaluate):
+            * wasm/js/WebAssemblyModuleRecord.h:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210091. rdar://problem/29782833
+
+    2016-12-21  Keith Miller  
+
+            WebAssembly: Fix decode floating point constants in unreachable code
+            https://bugs.webkit.org/show_bug.cgi?id=166400
+
+            Reviewed by Saam Barati.
+
+            We decoded these as variable length but they should be fixed length.
+
+            * wasm/WasmFunctionParser.h:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210090. rdar://problem/29782821
+
+    2016-12-21  Keith Miller  
+
+            WebAssembly: Allow br, br_if, and br_table to act as a return
+            https://bugs.webkit.org/show_bug.cgi?id=166393
+
+            Reviewed by Saam Barati.
+
+            This patch allows br, br_if, and br_table to treat branching to
+            the size of the control stack to act as a return. This change was
+            made by adding a new block type to the wasm function parser,
+            TopLevel. Adding this new block eliminates a lot of the special
+            case code we had in the parser previously. The only special case
+            we need is when the end opcode is parsed from the top level.  The
+            B3 IR generator needs to automatically emit a return at that
+            point.
+
+            Also, this patch adds the function number to validation errors
+            in the function parser. The current error message is not helpful
+            otherwise.
+
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::B3IRGenerator::ControlData::dump):
+            (JSC::Wasm::B3IRGenerator::addTopLevel):
+            * wasm/WasmFunctionParser.h:
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::parseAndValidateModule):
+            (JSC::Wasm::Plan::run):
+            * wasm/WasmValidate.cpp:
+            (JSC::Wasm::Validate::ControlData::dump):
+            (JSC::Wasm::Validate::Validate):
+            (JSC::Wasm::Validate::addTopLevel):
+            (JSC::Wasm::validateFunction):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210047. rdar://problem/29758107
+
+    2016-12-20  Saam Barati  
+
+            WebAssembly: We should compile wasm functions in parallel
+            https://bugs.webkit.org/show_bug.cgi?id=165993
+
+            Reviewed by Keith Miller.
+
+            This patch adds a very simple parallel compiler for Wasm code.
+            This patch speeds up compiling the Unity headless benchmark by
+            slightly more than 4x on my MBP. To make this safe, I perform
+            all linking on the main thread. I also had to change some code
+            inside Wasmb3IRGenerator to be thread safe.
+
+            * b3/air/AirCustom.h:
+            (JSC::B3::Air::WasmBoundsCheckCustom::generate):
+            * b3/air/AirGenerationContext.h:
+            * wasm/WasmB3IRGenerator.cpp:
+            (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+            (JSC::Wasm::B3IRGenerator::emitExceptionCheck):
+            (JSC::Wasm::createJSToWasmWrapper):
+            (JSC::Wasm::parseAndCompile):
+            * wasm/WasmB3IRGenerator.h:
+            * wasm/WasmCallingConvention.h:
+            (JSC::Wasm::CallingConvention::setupFrameInPrologue):
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::parseAndValidateModule):
+            (JSC::Wasm::Plan::run):
+            * wasm/WasmPlan.h:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210038. rdar://problem/29759741
+
+    2016-12-20  JF Bastien  
+
+            WebAssembly: construct 32-bit encodedJSValue properly
+            https://bugs.webkit.org/show_bug.cgi?id=166199
+
+            Reviewed by Mark Lam.
+
+            Constructing an encodedJSValue using `{ }` yields the wrong value
+            on 32-bit platforms. WebAssembly doesn't currently target 32-bit
+            platforms, but we may as well get it right.
+
+            * wasm/JSWebAssembly.cpp:
+            (JSC::webAssemblyCompileFunc):
+            (JSC::webAssemblyValidateFunc):
+            * wasm/js/JSWebAssemblyHelpers.h:
+            (JSC::toNonWrappingUint32):
+            * wasm/js/WebAssemblyCompileErrorConstructor.cpp:
+            (JSC::constructJSWebAssemblyCompileError):
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction):
+            * wasm/js/WebAssemblyInstanceConstructor.cpp:
+            (JSC::constructJSWebAssemblyInstance):
+            * wasm/js/WebAssemblyMemoryConstructor.cpp:
+            (JSC::constructJSWebAssemblyMemory):
+            * wasm/js/WebAssemblyModuleConstructor.cpp:
+            (JSC::constructJSWebAssemblyModule):
+            * wasm/js/WebAssemblyRuntimeErrorConstructor.cpp:
+            (JSC::constructJSWebAssemblyRuntimeError):
+            * wasm/js/WebAssemblyTableConstructor.cpp:
+            (JSC::constructJSWebAssemblyTable):
+            * wasm/js/WebAssemblyTablePrototype.cpp:
+            (JSC::webAssemblyTableProtoFuncLength):
+            (JSC::webAssemblyTableProtoFuncGrow):
+            (JSC::webAssemblyTableProtoFuncGet):
+            (JSC::webAssemblyTableProtoFuncSet):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210026. rdar://problem/29735737
+
+    2016-12-20  JF Bastien  
+
+            WebAssembly: unique function signatures
+            https://bugs.webkit.org/show_bug.cgi?id=165957
+            
+
+            Reviewed by Saam Barati.
+
+            Signatures in a Module's Type section can be duplicated, we
+            therefore need to unique them so that call_indirect only needs to
+            do a single integer compare to check that a callee's Signature is
+            the same as the Signature declared at the call site. Without
+            uniquing we'd either trap when duplicate Signatures are used, or
+            we'd need to do multiple comparisons. This patch makes that narrow
+            usecase function correctly.
+
+            There's further complication when calling from wasm to
+            wasm, in which case the Signatures must also match. Such
+            cross-instance calls will be improved in bug #165282, but this
+            patch sets the groundwork for it:
+
+            - Signatures are now owned by SignatureInformation which lives on
+              VM, and is shared by all Modules.
+            - When parsing a Module, a Signature is created for every Type
+              entry, and then uniqued by SignatureInformation's adopt
+              method. Duplicate Signatures are dropped and the previous
+              SignatureIndex is returned, new Signatures are adopted and a new
+              SignatureIndex is created.
+            - The SignatureIndex values are monotonic. 0 is used to represent
+              invalid indices, which trap. This can only occur through Table.
+            - SignatureInformation is used while generating code to map a
+              SignatureIndex back to the Signature* when return / argument
+              information is needed. This is a simple lookup into a Vector. It
+              isn't used at runtime.
+            - These Signatures live forever on VM because the bookkeeping
+              likely isn't worth it. We may want to empty things out if all
+              Modules die, this is tracked in bug #166037.
+            - We can further improve things by bit-packing SignatureIndex with
+              Code*, which is tracked by bug #165511.
+
+            * CMakeLists.txt:
+            * JavaScriptCore.xcodeproj/project.pbxproj:
+            * runtime/VM.h: wasm signatures are uniqued here, but aren't accessed frequently (only during parsing) so indirection is fine
+            * wasm/WasmB3IRGenerator.cpp: use SignatureIndex instead of Signature* when appropriate, and when still using Signature* do so with its new API
+            (JSC::Wasm::createJSToWasmWrapper):
+            (JSC::Wasm::parseAndCompile):
+            * wasm/WasmBinding.cpp:
+            (JSC::Wasm::importStubGenerator): use SignatureIndex
+            * wasm/WasmBinding.h:
+            * wasm/WasmCallingConvention.h:
+            (JSC::Wasm::CallingConvention::loadArguments):
+            * wasm/WasmFormat.cpp: drive-by move of alloc/free functions to the implementation file, allows the .h file to drop an FastMalloc.h
+            (JSC::Wasm::Segment::create):
+            (JSC::Wasm::Segment::destroy):
+            (JSC::Wasm::Segment::createPtr):
+            * wasm/WasmFormat.h: move Signature to its own file
+            (JSC::Wasm::CallableFunction::CallableFunction):
+            * wasm/WasmFunctionParser.h:
+            (JSC::Wasm::FunctionParser::FunctionParser):
+            * wasm/WasmModuleParser.cpp:
+            * wasm/WasmModuleParser.h:
+            (JSC::Wasm::ModuleParser::ModuleParser):
+            * wasm/WasmParser.h:
+            (JSC::Wasm::Parser::Parser):
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::parseAndValidateModule):
+            (JSC::Wasm::Plan::run):
+            * wasm/WasmSignature.cpp: Added.
+            (JSC::Wasm::Signature::dump):
+            (JSC::Wasm::Signature::hash):
+            (JSC::Wasm::Signature::create):
+            (JSC::Wasm::Signature::createInvalid):
+            (JSC::Wasm::Signature::destroy):
+            (JSC::Wasm::SignatureInformation::~SignatureInformation):
+            (JSC::Wasm::SignatureInformation::adopt):
+            (JSC::Wasm::SignatureInformation::get):
+            * wasm/WasmSignature.h: Added.
+            (JSC::Wasm::Signature::Signature):
+            (JSC::Wasm::Signature::storage):
+            (JSC::Wasm::Signature::allocatedSize):
+            (JSC::Wasm::Signature::returnType):
+            (JSC::Wasm::Signature::returnCount):
+            (JSC::Wasm::Signature::argumentCount):
+            (JSC::Wasm::Signature::argument):
+            (JSC::Wasm::Signature::operator==):
+            (JSC::Wasm::SignatureHash::empty):
+            (JSC::Wasm::SignatureHash::deleted):
+            (JSC::Wasm::SignatureHash::SignatureHash):
+            (JSC::Wasm::SignatureHash::operator==):
+            (JSC::Wasm::SignatureHash::equal):
+            (JSC::Wasm::SignatureHash::hash):
+            (JSC::Wasm::SignatureHash::isHashTableDeletedValue):
+            * wasm/WasmValidate.cpp:
+            (JSC::Wasm::validateFunction):
+            * wasm/WasmValidate.h:
+            * wasm/js/JSWebAssemblyInstance.cpp:
+            (JSC::JSWebAssemblyInstance::create):
+            * wasm/js/JSWebAssemblyModule.h:
+            (JSC::JSWebAssemblyModule::signatureForFunctionIndexSpace):
+            * wasm/js/JSWebAssemblyTable.cpp:
+            (JSC::JSWebAssemblyTable::JSWebAssemblyTable):
+            (JSC::JSWebAssemblyTable::clearFunction):
+            (JSC::JSWebAssemblyTable::setFunction):
+            * wasm/js/WebAssemblyFunction.cpp:
+            (JSC::callWebAssemblyFunction):
+            (JSC::WebAssemblyFunction::call):
+            (JSC::WebAssemblyFunction::create):
+            (JSC::WebAssemblyFunction::WebAssemblyFunction):
+            (JSC::WebAssemblyFunction::finishCreation):
+            * wasm/js/WebAssemblyFunction.h:
+            (JSC::WebAssemblyFunction::signatureIndex):
+            * wasm/js/WebAssemblyModuleRecord.cpp:
+            (JSC::WebAssemblyModuleRecord::link):
+            (JSC::WebAssemblyModuleRecord::evaluate):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r209979. rdar://problem/29735737
+
+    2016-12-18  Saam Barati  
+
+            WebAssembly: Implement the WebAssembly.compile and WebAssembly.validate
+            https://bugs.webkit.org/show_bug.cgi?id=165936
+
+            Reviewed by Mark Lam.
+
+            The APIs are documented here:
+            - https://github.com/WebAssembly/design/blob/master/JS.md#webassemblycompile
+            - https://github.com/WebAssembly/design/blob/master/JS.md#webassemblyvalidate
+
+            * wasm/JSWebAssembly.cpp:
+            (JSC::webAssemblyCompileFunc):
+            (JSC::webAssemblyValidateFunc):
+            (JSC::JSWebAssembly::finishCreation):
+            * wasm/WasmPlan.cpp:
+            (JSC::Wasm::Plan::parseAndValidateModule):
+            (JSC::Wasm::Plan::run):
+            * wasm/WasmPlan.h:
+            * wasm/js/JSWebAssemblyHelpers.h:
+            (JSC::getWasmBufferFromValue):
+            * wasm/js/WebAssemblyModuleConstructor.cpp:
+            (JSC::constructJSWebAssemblyModule):
+            (JSC::callJSWebAssemblyModule):
+            (JSC::WebAssemblyModuleConstructor::createModule):
+            * wasm/js/WebAssemblyModuleConstructor.h:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210359. rdar://problem/29882478
+
+    2017-01-05  Per Arne Vollan  
+
+            [Win] Compile error.
+            https://bugs.webkit.org/show_bug.cgi?id=166726
+
+            Reviewed by Alex Christensen.
+
+            Add include folder.
+
+            * CMakeLists.txt:
+
+2017-01-05  Matthew Hanson  
+
+        Merge r210028. rdar://problem/29747874
+
+    2016-12-20  JF Bastien  
+
+            WebAssembly API: implement WebAssembly.LinkError
+            https://bugs.webkit.org/show_bug.cgi?id=165805
+            
+
+            Reviewed by Mark Lam.
+
+            As described here: https://github.com/WebAssembly/design/pull/901
+            Some TypeError and RangeError are now converted to WebAssembly.LinkError.
+
+            * CMakeLists.txt: add files
+            * DerivedSources.make: add autoget .lut.h files
+            * JavaScriptCore.xcodeproj/project.pbxproj: add files
+            * builtins/BuiltinNames.h: new name LinkError
+            * runtime/JSGlobalObject.h: auto-register LinkError using existing macro magic
+            * wasm/JSWebAssembly.h: make the new includes available
+            * wasm/js/JSWebAssemblyLinkError.cpp: Copied from Source/JavaScriptCore/wasm/JSWebAssemblyCompileError.cpp.
+            (JSC::JSWebAssemblyLinkError::create):
+            (JSC::JSWebAssemblyLinkError::JSWebAssemblyLinkError):
+            (JSC::createWebAssemblyLinkError):
+            * wasm/js/JSWebAssemblyLinkError.h: Copied from Source/JavaScriptCore/wasm/JSWebAssemblyCompileError.h.
+            (JSC::JSWebAssemblyLinkError::create):
+            * wasm/js/WebAssemblyInstanceConstructor.cpp: update as per spec change
+            (JSC::constructJSWebAssemblyInstance):
+            * wasm/js/WebAssemblyLinkErrorConstructor.cpp: Copied from Source/JavaScriptCore/wasm/WebAssemblyCompileErrorConstructor.cpp.
+            (JSC::constructJSWebAssemblyLinkError):
+            (JSC::callJSWebAssemblyLinkError):
+            (JSC::WebAssemblyLinkErrorConstructor::create):
+            (JSC::WebAssemblyLinkErrorConstructor::createStructure):
+            (JSC::WebAssemblyLinkErrorConstructor::finishCreation):
+            (JSC::WebAssemblyLinkErrorConstructor::WebAssemblyLinkErrorConstructor):
+            (JSC::WebAssemblyLinkErrorConstructor::getConstructData):
+            (JSC::WebAssemblyLinkErrorConstructor::getCallData):
+            * wasm/js/WebAssemblyLinkErrorConstructor.h: Copied from Source/JavaScriptCore/wasm/WebAssemblyCompileErrorConstructor.h.
+            * wasm/js/WebAssemblyLinkErrorPrototype.cpp: Copied from Source/JavaScriptCore/wasm/WebAssemblyCompileErrorPrototypr.cpp.
+            (JSC::WebAssemblyLinkErrorPrototype::create):
+            (JSC::WebAssemblyLinkErrorPrototype::createStructure):
+            (JSC::WebAssemblyLinkErrorPrototype::finishCreation):
+            (JSC::WebAssemblyLinkErrorPrototype::WebAssemblyLinkErrorPrototype):
+            * wasm/js/WebAssemblyLinkErrorPrototype.h: Copied from Source/JavaScriptCore/wasm/WebAssemblyCompileErrorPrototypr.h.
+            * wasm/js/WebAssemblyModuleRecord.cpp: update as per spec change
+            (JSC::dataSegmentFail):
+            (JSC::WebAssemblyModuleRecord::evaluate):
+
+2017-01-05  Matthew Hanson  
+
+        Merge r209998. rdar://problem/29554366
+
+    2016-12-19  Joseph Pecoraro  
+
+            Web Inspector: Assertion seen in InspectorDebuggerAgent::refAsyncCallData with Inspector open
+            https://bugs.webkit.org/show_bug.cgi?id=166034
+            
+
+            Reviewed by Brian Burg.
+
+            * inspector/agents/InspectorDebuggerAgent.cpp:
+            (Inspector::InspectorDebuggerAgent::refAsyncCallData):
+            Remove assertion. This assert can happen if the currently executing callback
+            was just explicitly cancelled by script. Existing code already handles if
+            no async data was found for the given identifier.
+
+2016-12-19  Babak Shafiei  
+
+        Merge r210010.
+
+    2016-12-19  Mark Lam  
+
+            Rolling out r209974 and r209952. They break some websites in mysterious ways. Step 2: Rollout r209952.
+            https://bugs.webkit.org/show_bug.cgi?id=166049
+
+            Not reviewed.
+
+            * bytecode/HandlerInfo.h:
+            (JSC::HandlerInfoBase::typeName):
+            * bytecompiler/BytecodeGenerator.cpp:
+            (JSC::BytecodeGenerator::generate):
+            (JSC::BytecodeGenerator::BytecodeGenerator):
+            (JSC::BytecodeGenerator::emitReturn):
+            (JSC::BytecodeGenerator::pushFinallyControlFlowScope):
+            (JSC::BytecodeGenerator::pushIteratorCloseControlFlowScope):
+            (JSC::BytecodeGenerator::popFinallyControlFlowScope):
+            (JSC::BytecodeGenerator::popIteratorCloseControlFlowScope):
+            (JSC::BytecodeGenerator::emitComplexPopScopes):
+            (JSC::BytecodeGenerator::emitPopScopes):
+            (JSC::BytecodeGenerator::pushTry):
+            (JSC::BytecodeGenerator::popTryAndEmitCatch):
+            (JSC::BytecodeGenerator::labelScopeDepth):
+            (JSC::BytecodeGenerator::pushLocalControlFlowScope):
+            (JSC::BytecodeGenerator::popLocalControlFlowScope):
+            (JSC::BytecodeGenerator::emitEnumeration):
+            (JSC::BytecodeGenerator::emitYield):
+            (JSC::BytecodeGenerator::emitDelegateYield):
+            (JSC::BytecodeGenerator::popTry): Deleted.
+            (JSC::BytecodeGenerator::emitCatch): Deleted.
+            (JSC::BytecodeGenerator::restoreScopeRegister): Deleted.
+            (JSC::BytecodeGenerator::labelScopeDepthToLexicalScopeIndex): Deleted.
+            (JSC::BytecodeGenerator::emitIsNumber): Deleted.
+            (JSC::BytecodeGenerator::emitJumpViaFinallyIfNeeded): Deleted.
+            (JSC::BytecodeGenerator::emitReturnViaFinallyIfNeeded): Deleted.
+            (JSC::BytecodeGenerator::emitFinallyCompletion): Deleted.
+            (JSC::BytecodeGenerator::allocateFinallyRegisters): Deleted.
+            (JSC::BytecodeGenerator::releaseFinallyRegisters): Deleted.
+            (JSC::BytecodeGenerator::emitCompareFinallyActionAndJumpIf): Deleted.
+            * bytecompiler/BytecodeGenerator.h:
+            (JSC::BytecodeGenerator::isInFinallyBlock):
+            (JSC::FinallyJump::FinallyJump): Deleted.
+            (JSC::FinallyContext::FinallyContext): Deleted.
+            (JSC::FinallyContext::outerContext): Deleted.
+            (JSC::FinallyContext::finallyLabel): Deleted.
+            (JSC::FinallyContext::depth): Deleted.
+            (JSC::FinallyContext::numberOfBreaksOrContinues): Deleted.
+            (JSC::FinallyContext::incNumberOfBreaksOrContinues): Deleted.
+            (JSC::FinallyContext::handlesReturns): Deleted.
+            (JSC::FinallyContext::setHandlesReturns): Deleted.
+            (JSC::FinallyContext::registerJump): Deleted.
+            (JSC::FinallyContext::numberOfJumps): Deleted.
+            (JSC::FinallyContext::jumps): Deleted.
+            (JSC::ControlFlowScope::ControlFlowScope): Deleted.
+            (JSC::ControlFlowScope::isLabelScope): Deleted.
+            (JSC::ControlFlowScope::isFinallyScope): Deleted.
+            (JSC::BytecodeGenerator::currentLexicalScopeIndex): Deleted.
+            (JSC::BytecodeGenerator::FinallyRegistersScope::FinallyRegistersScope): Deleted.
+            (JSC::BytecodeGenerator::FinallyRegistersScope::~FinallyRegistersScope): Deleted.
+            (JSC::BytecodeGenerator::finallyActionRegister): Deleted.
+            (JSC::BytecodeGenerator::finallyReturnValueRegister): Deleted.
+            (JSC::BytecodeGenerator::emitSetFinallyActionToNormalCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitSetFinallyActionToReturnCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitSetFinallyActionToJumpID): Deleted.
+            (JSC::BytecodeGenerator::emitSetFinallyReturnValueRegister): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNormalCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotJump): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsReturnCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotReturnCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotThrowCompletion): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfCompletionTypeIsThrow): Deleted.
+            (JSC::BytecodeGenerator::bytecodeOffsetToJumpID): Deleted.
+            * bytecompiler/NodesCodegen.cpp:
+            (JSC::ContinueNode::emitBytecode):
+            (JSC::BreakNode::emitBytecode):
+            (JSC::ReturnNode::emitBytecode):
+            (JSC::TryNode::emitBytecode):
+
+2016-12-19  Babak Shafiei  
+
+        Merge r210007.
+
+    2016-12-19  Mark Lam  
+
+            Rolling out r209974 and r209952. They break some websites in mysterious ways. Step 1: Rollout r209974.
+            https://bugs.webkit.org/show_bug.cgi?id=166049
+
+            Not reviewed.
+
+            * bytecompiler/BytecodeGenerator.cpp:
+            (JSC::BytecodeGenerator::emitEnumeration):
+            (JSC::BytecodeGenerator::emitJumpViaFinallyIfNeeded):
+            (JSC::BytecodeGenerator::emitReturnViaFinallyIfNeeded):
+            (JSC::BytecodeGenerator::emitFinallyCompletion):
+            (JSC::BytecodeGenerator::allocateFinallyRegisters):
+            (JSC::BytecodeGenerator::releaseFinallyRegisters):
+            (JSC::BytecodeGenerator::emitCompareFinallyActionAndJumpIf):
+            (JSC::BytecodeGenerator::allocateCompletionRecordRegisters): Deleted.
+            (JSC::BytecodeGenerator::releaseCompletionRecordRegisters): Deleted.
+            (JSC::BytecodeGenerator::emitJumpIfCompletionType): Deleted.
+            * bytecompiler/BytecodeGenerator.h:
+            (JSC::FinallyJump::FinallyJump):
+            (JSC::FinallyContext::registerJump):
+            (JSC::BytecodeGenerator::FinallyRegistersScope::FinallyRegistersScope):
+            (JSC::BytecodeGenerator::FinallyRegistersScope::~FinallyRegistersScope):
+            (JSC::BytecodeGenerator::finallyActionRegister):
+            (JSC::BytecodeGenerator::finallyReturnValueRegister):
+            (JSC::BytecodeGenerator::emitSetFinallyActionToNormalCompletion):
+            (JSC::BytecodeGenerator::emitSetFinallyActionToReturnCompletion):
+            (JSC::BytecodeGenerator::emitSetFinallyActionToJumpID):
+            (JSC::BytecodeGenerator::emitSetFinallyReturnValueRegister):
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNormalCompletion):
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotJump):
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsReturnCompletion):
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotReturnCompletion):
+            (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotThrowCompletion):
+            (JSC::BytecodeGenerator::emitJumpIfCompletionTypeIsThrow):
+            (JSC::BytecodeGenerator::bytecodeOffsetToJumpID):
+            (JSC::bytecodeOffsetToJumpID): Deleted.
+            (JSC::BytecodeGenerator::CompletionRecordScope::CompletionRecordScope): Deleted.
+            (JSC::BytecodeGenerator::CompletionRecordScope::~CompletionRecordScope): Deleted.
+            (JSC::BytecodeGenerator::completionTypeRegister): Deleted.
+            (JSC::BytecodeGenerator::completionValueRegister): Deleted.
+            (JSC::BytecodeGenerator::emitSetCompletionType): Deleted.
+            (JSC::BytecodeGenerator::emitSetCompletionValue): Deleted.
+            * bytecompiler/NodesCodegen.cpp:
+            (JSC::TryNode::emitBytecode):
+
+2016-12-19  Dean Jackson  
+
+        Merge another patch for rdar://problem/29466493.
+
+    2016-12-19  Dean Jackson  
+
+            Disable some features on the safari-603-branch.
+            
+
+            * Configurations/FeatureDefines.xcconfig:
+
+2016-12-19  Babak Shafiei  
+
+        Merge patch for rdar://problem/29466493.
+
+    2016-12-19  Dean Jackson  
+
+            Disable some features on the safari-603-branch.
+            
+
+            - Force the default state of experimental features to off.
+            - Move some experimental features that should be enabled on this
+              branch into the general feature list, so they can't be disabled.
+            - Disable some features that are not ready.
+
+            * Configurations/FeatureDefines.xcconfig:
+
+2016-12-18  Mark Lam  
+
+        Rename finallyActionRegister to completionTypeRegister and only store int JSValues in it.
+        https://bugs.webkit.org/show_bug.cgi?id=165979
+
+        Reviewed by Saam Barati.
+
+        This patch makes it so that we only store int JSValues in the finallyActionRegister
+        thereby making type prediction on this register more successful for JITs.  In so
+        doing, we are able to get some additional benefits:
+
+        1. Renamed the following:
+           FinallyRegistersScope => CompletionRecordScope
+           finallyActionRegister => completionTypeRegister
+           finallyReturnValueRegister => completionValueRegister
+
+           These new names are more in line with the ES spec, which describes these
+           values as the completion record and its type and value properties.
+           https://tc39.github.io/ecma262/#sec-completion-record-specification-type
+
+        2. We now think of the Break and Continue jumpIDs as encodings of CompletionType
+           (in our implementation of completion type).  As a result, we only need one of
+           each of the emitter methods for getting, setting, and compare-and-jump on the
+           completion type.  The code using these methods also reads much clearer now.  
+
+        3. Finally blocks' op_catch should now always pop the caught Exception object into
+           the completionValueRegister instead of the completionTypeRegister (formerly
+           finallyActionRegister). 
+
+        Also removed the restoreScopeRegister() call in the IteratorClose catch block
+        because that is an implementation specific synthesized catch block, and we
+        can guarantee that it never needs to resolve any symbols from the scope.  Hence,
+        there is no need to restore the scope register.
+
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::emitEnumeration):
+        (JSC::BytecodeGenerator::emitJumpViaFinallyIfNeeded):
+        (JSC::BytecodeGenerator::emitReturnViaFinallyIfNeeded):
+        (JSC::BytecodeGenerator::emitFinallyCompletion):
+        (JSC::BytecodeGenerator::allocateCompletionRecordRegisters):
+        (JSC::BytecodeGenerator::releaseCompletionRecordRegisters):
+        (JSC::BytecodeGenerator::emitJumpIfCompletionType):
+        (JSC::BytecodeGenerator::allocateFinallyRegisters): Deleted.
+        (JSC::BytecodeGenerator::releaseFinallyRegisters): Deleted.
+        (JSC::BytecodeGenerator::emitCompareFinallyActionAndJumpIf): Deleted.
+        * bytecompiler/BytecodeGenerator.h:
+        (JSC::bytecodeOffsetToJumpID):
+        (JSC::FinallyJump::FinallyJump):
+        (JSC::FinallyContext::registerJump):
+        (JSC::BytecodeGenerator::CompletionRecordScope::CompletionRecordScope):
+        (JSC::BytecodeGenerator::CompletionRecordScope::~CompletionRecordScope):
+        (JSC::BytecodeGenerator::completionTypeRegister):
+        (JSC::BytecodeGenerator::completionValueRegister):
+        (JSC::BytecodeGenerator::emitSetCompletionType):
+        (JSC::BytecodeGenerator::emitSetCompletionValue):
+        (JSC::BytecodeGenerator::FinallyRegistersScope::FinallyRegistersScope): Deleted.
+        (JSC::BytecodeGenerator::FinallyRegistersScope::~FinallyRegistersScope): Deleted.
+        (JSC::BytecodeGenerator::finallyActionRegister): Deleted.
+        (JSC::BytecodeGenerator::finallyReturnValueRegister): Deleted.
+        (JSC::BytecodeGenerator::emitSetFinallyActionToNormalCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitSetFinallyActionToReturnCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitSetFinallyActionToJumpID): Deleted.
+        (JSC::BytecodeGenerator::emitSetFinallyReturnValueRegister): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNormalCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotJump): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsReturnCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotReturnCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotThrowCompletion): Deleted.
+        (JSC::BytecodeGenerator::emitJumpIfCompletionTypeIsThrow): Deleted.
+        (JSC::BytecodeGenerator::bytecodeOffsetToJumpID): Deleted.
+        * bytecompiler/NodesCodegen.cpp:
+        (JSC::TryNode::emitBytecode):
+
+2016-12-17  Saam Barati  
+
+        WebAssembly: WasmB3IRGenerator uses WarmAny as a ValueRep but expects the incoming value to be a register
+        https://bugs.webkit.org/show_bug.cgi?id=165989
+
+        Reviewed by Mark Lam.
+
+        The input should be constrained to a register to match what
+        the patchpoint code expects.
+
+        * wasm/WasmB3IRGenerator.cpp:
+
+2016-12-17  Saam Barati  
+
+        WebAssembly: Change a RELEASE_ASSERT_NOT_REACHED to a jit.breakpoint() for now to allow us to run some wasm benchmarks
+        https://bugs.webkit.org/show_bug.cgi?id=165990
+
+        Reviewed by Mark Lam.
+
+        * wasm/WasmBinding.cpp:
+        (JSC::Wasm::importStubGenerator):
+
+2016-12-16  Joseph Pecoraro  
+
+        JSContext Inspector: Avoid some possible exceptions inspecting a JSContext
+        https://bugs.webkit.org/show_bug.cgi?id=165986
+        
+
+        Reviewed by Matt Baker.
+
+        * inspector/InjectedScriptSource.js:
+        (InjectedScript.prototype.processProperties):
+        Prefer String.prototype.endsWith now that it is available.
+
+        (InjectedScript.prototype._describe):
+        Prefer Function.prototype.toString for converting functions to String.
+        Previously we were doing String(f) which would to Symbol.toPrimitive
+        conversion which seems unnecessary here.
+
+2016-12-16  Michael Catanzaro  
+
+        Unreviewed, fix GCC 6 build failure after r209952
+
+        Return false, not nullptr, in function returning bool.
+
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::emitJumpViaFinallyIfNeeded):
+
+2016-12-16  Saam Barati  
+
+        WebAssembly: We still have some incorrect parsing productions inside unreachable code
+        https://bugs.webkit.org/show_bug.cgi?id=165981
+
+        Reviewed by Keith Miller.
+
+        This hardens our parsing for CallIndirect and Loop/Block/If to be exactly like their reachable variant.
+        
+        It also fixes a more nefarious bug in which we were decoding an extra varuint32
+        for Br/BrIf inside unreachable code.
+
+        * wasm/WasmFunctionParser.h:
+
+2016-12-16  Filip Pizlo  
+
+        CellState should have members with accurate names
+        https://bugs.webkit.org/show_bug.cgi?id=165969
+
+        Reviewed by Mark Lam.
+        
+        This once again renames the members in CellState. I wanted to convey the following
+        pieces of information in the names:
+        
+        - What does the state mean for Generational GC?
+        - What does the state mean for Concurrent GC?
+        - Does the state guarantee what it means, or is there some contingency?
+        
+        The names I came up with are:
+        
+        PossiblyOldOrBlack: An object in this state may be old, or may be black, depending on
+            other things. If the mark bit is set then the object is either black or being
+            blackened as we speak. It's going to survive the GC, so it will be old, but may be
+            new now. In between GCs, objects in this state are definitely old. If the mark bit
+            is not set, then the object is actually old and white.
+        
+        DefinitelyNewAndWhite: The object was just allocated so it is white (not marked) and
+            new.
+        
+        DefinitelyGrey: The object is definitely grey - it will be rescanned in the future. It
+            may be new or old depending on other things.
+
+        * heap/CellState.h:
+        * heap/Heap.cpp:
+        (JSC::Heap::addToRememberedSet):
+        (JSC::Heap::writeBarrierSlowPath):
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::appendJSCellOrAuxiliary):
+        (JSC::SlotVisitor::setMarkedAndAppendToMarkStack):
+        (JSC::SlotVisitor::appendToMarkStack):
+        (JSC::SlotVisitor::visitChildren):
+        * runtime/JSCellInlines.h:
+        (JSC::JSCell::JSCell):
+        * runtime/StructureIDBlob.h:
+        (JSC::StructureIDBlob::StructureIDBlob):
+
+2016-12-16  Saam Barati  
+
+        B3::DoubleToFloatReduction will accidentally convince itself it converted a Phi from Double to Float and then convert uses of that Phi into a use of FloatToDouble(@Phi)
+        https://bugs.webkit.org/show_bug.cgi?id=165946
+
+        Reviewed by Keith Miller.
+
+        This was happening because the phase will convert some Phi nodes
+        from Double to Float. However, one place that did this conversion
+        forgot to first check if the Phi was already a Float. If it's already
+        a Float, a later part of the phase will be buggy if the phase claims that it has
+        converted it from Double->Float. The reason is that at the end of the
+        phase, we'll look for all uses of former Double Phi nodes and make them
+        be a use of ConvertFloatToDouble on the Phi, instead of a use of the Phi itself.
+        This is clearly wrong if the Phi were Float to begin with (and
+        therefore, the uses were Float uses to begin with).
+
+        * b3/B3ReduceDoubleToFloat.cpp:
+        * b3/testb3.cpp:
+        (JSC::B3::testReduceFloatToDoubleValidates):
+        (JSC::B3::run):
+
+2016-12-16  Mark Lam  
+
+        De-duplicate finally blocks.
+        https://bugs.webkit.org/show_bug.cgi?id=160168
+
+        Reviewed by Keith Miller.
+
+        JS execution can arrive at a finally block when there are abrupt completions from
+        its try or catch block.  The abrupt completion types include Break,
+        Continue, Return, and Throw.  The non-abrupt completion type is called Normal
+        (i.e. the case of a try block falling through to the finally block).
+
+        Previously, we enable each of these paths for abrupt completion (except for Throw)
+        to run the finally block code by duplicating the finally block code at each of
+        the sites that trigger those completions.  This patch fixes the implementation so
+        that each of these abrupt completions will set a finallyActionRegister (plus a
+        finallyReturnValueRegister for CompletionType::Return) and then jump to the
+        relevant finally blocks, and continue to thread through subsequent outer finally
+        blocks until execution reaches the outermost finally block that the completion
+        type dictates.  We no longer duplicate the finally block code.
+
+        The implementation details:
+        1. We allocate a pair of finallyActionRegister and finallyReturnValueRegister
+           just before entering the outermost try-catch-finally scope.
+
+           On allocating the registers, we set them to the empty JSValue.  This serves
+           to set the completion type to CompletionType::Normal (see (2) below).
+
+        2. The finallyActionRegister serves 2 purpose:
+           a. indicates the CompletionType that triggered entry into the finally block.
+
+              This is how we encode the completion type in the finallyActionRegister:
+              1. CompletionType::Normal
+                 - finallyActionRegister is set to the empty JSValue.
+              2. CompletionType::Break
+                 - finallyActionRegister is set to the int jumpID for the site of the break statement.
+              3. CompletionType::Continue
+                 - finallyActionRegister is set to the int jumpID for the site of the continue statement.
+              4. CompletionType::Return
+                 - finallyActionRegister is set to CompletionType::Return as an int JSValue.
+                 - finallyReturnValueRegister is set to the value to be returned. 
+              5. CompletionType::Throw
+                 - finallyActionRegister is set to the exception object that was caught by the finally block.
+
+              Hence, if the finallyActionRegister can either be:
+              1. empty i.e. we're handling CompletionType::Normal.
+              2. an int JSValue i.e. we're handling CompletionType::Break, Continue, or Return.
+              3. an object i.e. we're handling CompletionType::Throw.
+
+           b. stores the exception caught in the finally block if we're handing
+              CompletionType::Throw.
+
+        3. Each finally block will have 2 entries:
+           a. the entry via throw.
+           b. the normal entry.
+
+           The entry via throw is recorded in the codeBlock's exception table, and can
+           only be jumped to by the VM's exception handling mechanism.
+
+           The normal entry is recorded in a FinallyContext (at bytecode generation time
+           only) and is jumped to when we want enter the finally block due any of the
+           other CompletionTypes.
+
+        4. CompletionType::Normal
+           ======================
+           We encounter this when falling through from a try or catch block to the finally block.  
+           
+           For the try block case, since finallyActionRegister is set to Normal by default,
+           there's nothing more that needs to be done.
+
+           For the catch block case, since we entered the catch block with an exception,
+           finallyActionRegister may be set to Throw.  We'll need to set it to Normal
+           before jumping to the finally block's normal entry.
+
+           CompletionType::Break
+           =====================
+           When we emit bytecode for the BreakNode, we check if we have any FinallyContexts
+           that we need to service before jumping to the breakTarget.  If we do, then:
+           a. we'll register a jumpID along with the breakTarget with the outermost FinallyContext.
+           b. we'll also increment the numberOfBreaksOrContinues count in each FinallyContext
+              from the innermost to the outermost.
+           c. instead of emitting bytecode to jump to the breakTarget, we:
+              1. emit bytecode to set finallyActionRegister to the jumpID.
+              b. emit bytecode to jump to the normal entry of the innermost finally block.
+
+           Each finally block will take care of cascading to the next outer finally block
+           as needed (see (5) below).
+
+           CompletionType::Continue
+           ========================
+           Since continues and breaks work the same way (i.e. with a jump), we handle this
+           exactly the same way as CompletionType::Break, except that we use the
+           continueTarget instead of the breakTarget.
+
+           CompletionType::Return
+           ======================
+           When we emit bytecode for the ReturnNode, we check if we have any FinallyContexts
+           at all on the m_controlFlowScopeStack.
+
+           If so, then instead of emitting op_ret, we:
+              1. emit bytecode to set finallyActionRegister to the CompletionType::Return.
+              1. emit bytecode to move the return value into finallyReturnValueRegister.
+              2. emit bytecode to jump to the normal entry of the innermost finally block.
+
+           Each finally block will take care of cascading to the next outer finally block
+           as needed (see (5) below).
+
+           CompletionType::Throw
+           ======================
+           The op_catch of a finally block will always store the caught exception object
+           in the finallyActionRegister.  This means we're handling CompletionType::Throw
+           (see (2) above).
+
+        5. What happens in each finally block?
+           ==================================
+           Only the finally block's entry via throw will have an op_catch that catches the
+           pending exception (and stores it in the finallyActionRegister).  This throw
+           entry then falls through to the normal entry.
+
+           The finally block's normal entry will restore the scope of the finally block
+           and proceed to execute its code.
+
+           At the end of the finally block (see emitFinallyCompletion()), the finally
+           block will check the finallyActionRegister for each completion type in the
+           following order:
+           
+           a. CompletionType::Normal: jump to the code after the finally block as
+              designated by a normalCompletion label.
+
+           b. CompletionType::Break and Continue:
+              If the FinallyContext for this block has registered FinallyJumps, we'll
+              check for the jumpIDs against the finallyActionRegister.  If the jumpID
+              matches, jump to the corresponding jumpTarget.
+
+              If no jumpIDs match but the FinallyContext's numberOfBreaksOrContinues is
+              greater than the number of registered FinallyJumps, then this means that
+              we have a Break or Continue that needs to be handled by an outer finally
+              block.  In that case, jump to the outer finally block's normal entry.
+              
+           c. CompletionType::Return:
+              If this finally block is not the outermost and finallyActionRegister contains
+              CompletionType::Return, then jump to the outer finally block's normal entry.
+
+              Otherwise, if this finally block is the outermost and finallyActionRegister
+              contains CompletionType::Return, then execute op_ret and return the value
+              in finallyReturnValueRegister.
+
+           d. CompletionType::Throw:
+              If we're not handling any of the above cases, then just throw the
+              finallyActionRegister which contains the exception to re-throw.
+
+        6. restoreScopeRegister()
+        
+           Since the needed scope objects are always stored in a local, we can restore
+           the scope register by simply moving from that local instead of going through
+           op_get_parent_scope.
+
+        7. m_controlFlowScopeStack needs to be a SegmentedVector instead of a Vector.
+           This makes it easier to keep a pointer to the FinallyContext on that stack,
+           and not have to worry about the vector being realloc'ed due to resizing. 
+
+        Performance appears to be neutral both on ES6SampleBench (run via cli) and the
+        JSC benchmarks.
+
+        Relevant spec references:
+        https://tc39.github.io/ecma262/#sec-completion-record-specification-type
+        https://tc39.github.io/ecma262/#sec-try-statement-runtime-semantics-evaluation
+
+        * bytecode/HandlerInfo.h:
+        (JSC::HandlerInfoBase::typeName):
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::generate):
+        (JSC::BytecodeGenerator::BytecodeGenerator):
+        (JSC::BytecodeGenerator::emitReturn):
+        (JSC::BytecodeGenerator::pushFinallyControlFlowScope):
+        (JSC::BytecodeGenerator::popFinallyControlFlowScope):
+        (JSC::BytecodeGenerator::allocateAndEmitScope):
+        (JSC::BytecodeGenerator::pushTry):
+        (JSC::BytecodeGenerator::popTry):
+        (JSC::BytecodeGenerator::emitCatch):
+        (JSC::BytecodeGenerator::restoreScopeRegister):
+        (JSC::BytecodeGenerator::labelScopeDepthToLexicalScopeIndex):
+        (JSC::BytecodeGenerator::labelScopeDepth):
+        (JSC::BytecodeGenerator::pushLocalControlFlowScope):
+        (JSC::BytecodeGenerator::popLocalControlFlowScope):
+        (JSC::BytecodeGenerator::emitEnumeration):
+        (JSC::BytecodeGenerator::emitIsNumber):
+        (JSC::BytecodeGenerator::emitYield):
+        (JSC::BytecodeGenerator::emitDelegateYield):
+        (JSC::BytecodeGenerator::emitJumpViaFinallyIfNeeded):
+        (JSC::BytecodeGenerator::emitReturnViaFinallyIfNeeded):
+        (JSC::BytecodeGenerator::emitFinallyCompletion):
+        (JSC::BytecodeGenerator::allocateFinallyRegisters):
+        (JSC::BytecodeGenerator::releaseFinallyRegisters):
+        (JSC::BytecodeGenerator::emitCompareFinallyActionAndJumpIf):
+        (JSC::BytecodeGenerator::pushIteratorCloseControlFlowScope): Deleted.
+        (JSC::BytecodeGenerator::popIteratorCloseControlFlowScope): Deleted.
+        (JSC::BytecodeGenerator::emitComplexPopScopes): Deleted.
+        (JSC::BytecodeGenerator::emitPopScopes): Deleted.
+        (JSC::BytecodeGenerator::popTryAndEmitCatch): Deleted.
+        * bytecompiler/BytecodeGenerator.h:
+        (JSC::FinallyJump::FinallyJump):
+        (JSC::FinallyContext::FinallyContext):
+        (JSC::FinallyContext::outerContext):
+        (JSC::FinallyContext::finallyLabel):
+        (JSC::FinallyContext::depth):
+        (JSC::FinallyContext::numberOfBreaksOrContinues):
+        (JSC::FinallyContext::incNumberOfBreaksOrContinues):
+        (JSC::FinallyContext::handlesReturns):
+        (JSC::FinallyContext::setHandlesReturns):
+        (JSC::FinallyContext::registerJump):
+        (JSC::FinallyContext::numberOfJumps):
+        (JSC::FinallyContext::jumps):
+        (JSC::ControlFlowScope::ControlFlowScope):
+        (JSC::ControlFlowScope::isLabelScope):
+        (JSC::ControlFlowScope::isFinallyScope):
+        (JSC::BytecodeGenerator::currentLexicalScopeIndex):
+        (JSC::BytecodeGenerator::FinallyRegistersScope::FinallyRegistersScope):
+        (JSC::BytecodeGenerator::FinallyRegistersScope::~FinallyRegistersScope):
+        (JSC::BytecodeGenerator::finallyActionRegister):
+        (JSC::BytecodeGenerator::finallyReturnValueRegister):
+        (JSC::BytecodeGenerator::emitSetFinallyActionToNormalCompletion):
+        (JSC::BytecodeGenerator::emitSetFinallyActionToReturnCompletion):
+        (JSC::BytecodeGenerator::emitSetFinallyActionToJumpID):
+        (JSC::BytecodeGenerator::emitSetFinallyReturnValueRegister):
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNormalCompletion):
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotJump):
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsReturnCompletion):
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotReturnCompletion):
+        (JSC::BytecodeGenerator::emitJumpIfFinallyActionIsNotThrowCompletion):
+        (JSC::BytecodeGenerator::emitJumpIfCompletionTypeIsThrow):
+        (JSC::BytecodeGenerator::bytecodeOffsetToJumpID):
+        (JSC::BytecodeGenerator::isInFinallyBlock): Deleted.
+        * bytecompiler/NodesCodegen.cpp:
+        (JSC::ContinueNode::emitBytecode):
+        (JSC::BreakNode::emitBytecode):
+        (JSC::ReturnNode::emitBytecode):
+        (JSC::TryNode::emitBytecode):
+
+2016-12-16  Keith Miller  
+
+        Add missing cases to parseUnreachableExpression and cleanup FunctionParser
+        https://bugs.webkit.org/show_bug.cgi?id=165966
+
+        Reviewed by Saam Barati.
+
+        This patch adds a number of missing cases to the Wasm FunctionParser's unreachable
+        code decoder. It also, removes unneeded OpType namespaces where they were not
+        needed and has the unary / binary macros cover all the cases rather than
+        just the simple cases.
+
+        * wasm/WasmFunctionParser.h:
+
+2016-12-16  Mark Lam  
+
+        Add predecessor info to dumps from JSC_dumpBytecodeLivenessResults=true.
+        https://bugs.webkit.org/show_bug.cgi?id=165958
+
+        Reviewed by Saam Barati.
+
+        Also:
+        1. refactored the code to use a common lambda function to dump FastBitVectors.
+        2. list successors by their block index instead of pointers.
+
+        * bytecode/BytecodeLivenessAnalysis.cpp:
+        (JSC::BytecodeLivenessAnalysis::dumpResults):
+
+2016-12-16  Saam Barati  
+
+        WebAssembly: WasmB3IRGenerator should throw exceptions instead of crash
+        https://bugs.webkit.org/show_bug.cgi?id=165834
+
+        Reviewed by Keith Miller.
+
+        This patch generalizes how we throw exceptions in the Wasm::B3IRGenerator.
+        There are still places where we need to throw exceptions and we don't, but
+        this patch removes most of those places inside the IR generator. There are
+        still a few places we need to throw exceptions inside the IR generator, like
+        div/mod by 0. Those will be done in a separate patch. Also, there are
+        still some stubs we need to throw exceptions from; those will also be
+        done in a separate patch.
+
+        All exceptions thrown from Wasm share a common stub. The ABI for the stub
+        is to move the Wasm::ExceptionType into argGPR1 and jump to the stub.
+        The stub will then throw an exception with an error message tailored
+        to the particular Wasm::ExceptionType failure.
+
+        This patch also refactors B3::Compilation. Before, B3::Compilation(VM, Procedure)
+        constructor would compile a B3 function. This patch makes B3::Compilation a simple 
+        tuple that keeps the necessary bits of B3 function alive in order to be runnable.
+        There is a new function that actually does the compilation for you. It is:
+        Compilation B3::compile(VM&, Procedure&)
+        The reason for this change is that I'm now using B3::Compilation(CodeRef, OpaqueByproducts)
+        constructor in Wasm code. It is weird to have a class both have a
+        constructor that instantiates the tuple, and another that performs the
+        compilation and then instantiates the tuple. It's more straight
+        forward if Compilation's job wasn't to actually do the compilation
+        but just to hold the necessary bits to keep a compiled B3 alive.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * b3/B3Compilation.cpp:
+        (JSC::B3::Compilation::Compilation):
+        * b3/B3Compilation.h:
+        * b3/B3Compile.cpp: Added.
+        (JSC::B3::compile):
+        * b3/B3Compile.h: Added.
+        * b3/testb3.cpp:
+        (JSC::B3::compile):
+        * jit/ThunkGenerators.cpp:
+        (JSC::throwExceptionFromWasmThunkGenerator):
+        * jit/ThunkGenerators.h:
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::B3IRGenerator::emitExceptionCheck):
+        (JSC::Wasm::createJSToWasmWrapper):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmExceptionType.h: Added.
+        (JSC::Wasm::errorMessageForExceptionType):
+
+2016-12-16  Keith Miller  
+
+        i64.eqz should use an Int64 zero
+        https://bugs.webkit.org/show_bug.cgi?id=165942
+
+        Reviewed by Mark Lam.
+
+        This patch fixes i64.eqz, which was using an Int32 zero
+        for the comparison previously. This patch also, adds
+        printing opcodes names in verbose mode.
+
+        * wasm/WasmFunctionParser.h:
+        * wasm/generateWasmOpsHeader.py:
+        * wasm/wasm.json:
+
+2016-12-15  Darin Adler  
+
+        Use asString instead of toWTFString, toString, or getString when we already checked isString
+        https://bugs.webkit.org/show_bug.cgi?id=165895
+
+        Reviewed by Yusuke Suzuki.
+
+        Once we have called isString, we should always use asString and value rather than using
+        functions that have to deal with non-JSString objects. This leads to slightly fewer branches,
+        slightly less reference count churn, since the string is stored right inside the JSString,
+        and obviates the need for exception handling.
+
+        * bindings/ScriptValue.cpp:
+        (Inspector::jsToInspectorValue): Use asString/value instead of getString.
+        * dfg/DFGOperations.cpp:
+        (JSC::DFG::operationMapHash): Call jsMapHash with its new arguments.
+        * inspector/JSInjectedScriptHost.cpp:
+        (Inspector::JSInjectedScriptHost::evaluateWithScopeExtension): Use asString/value instead
+        of toWTFString.
+        * inspector/JSJavaScriptCallFrame.cpp:
+        (Inspector::JSJavaScriptCallFrame::evaluateWithScopeExtension): Ditto.
+        * inspector/agents/InspectorHeapAgent.cpp:
+        (Inspector::InspectorHeapAgent::getPreview): Use asString/tryGetValue, instead of the
+        peculiar getString(nullptr) that was here before.
+        * jsc.cpp:
+        (functionGetGetterSetter): Use asString/toIdentifier instead of the much less efficient
+        toWTFString/Identifier::fromString.
+        (functionIsRope): Use asString instead of jsCast; same thing, but we should
+        prefer the asString function, since it exists.
+        (functionFindTypeForExpression): Use asString/value instead of getString.
+        (functionHasBasicBlockExecuted): Ditto.
+        (functionBasicBlockExecutionCount): Ditto.
+        (functionCreateBuiltin): Use asString/value instead of toWTFString and removed
+        unneeded RETURN_IF_EXCEPTION.
+        (valueWithTypeOfWasmValue): Use asString instead of jsCast.
+        (box): Ditto.
+        * runtime/DateConstructor.cpp:
+        (JSC::constructDate): Use asString/values instead of getString.
+        * runtime/ExceptionHelpers.cpp:
+        (JSC::errorDescriptionForValue): Tweaked formatting.
+
+        * runtime/HashMapImpl.h:
+        (JSC::jsMapHash): Changed this function to use asString/value.
+
+        * runtime/JSCJSValue.cpp:
+        (JSC::JSValue::dumpInContextAssumingStructure): Use asString instead of
+        jsCast.
+        (JSC::JSValue::dumpForBacktrace): Ditto.
+        * runtime/JSCJSValueInlines.h:
+        (JSC::toPreferredPrimitiveType): Ditto.
+
+        * runtime/JSGlobalObjectFunctions.cpp:
+        (JSC::globalFuncEval): Use asString/value instead of toWTFString.
+
+        * runtime/JSString.cpp:
+        (JSC::JSString::destroy): Streamlined by removing local variable.
+        (JSC::JSString::estimatedSize): Use asString instead of jsCast.
+        (JSC::JSString::visitChildren): Ditto.
+        (JSC::JSString::toThis): Ditto.
+        * runtime/JSString.h:
+        (JSC::JSValue::toString): Ditto.
+        (JSC::JSValue::toStringOrNull): Ditto.
+        * runtime/NumberPrototype.cpp:
+        (JSC::numberProtoFuncValueOf): Ditto.
+        * runtime/ObjectPrototype.cpp:
+        (JSC::objectProtoFuncToString): Ditto.
+        * runtime/StringPrototype.cpp:
+        (JSC::stringProtoFuncRepeatCharacter): Ditto.
+        (JSC::stringProtoFuncSubstr): Ditto.
+        (JSC::builtinStringSubstrInternal): Simplified assertion by removing local variable.
+
+2016-12-15  Keith Miller  
+
+        Fix validation of non-void if blocks with no else
+        https://bugs.webkit.org/show_bug.cgi?id=165938
+
+        Reviewed by Saam Barati.
+
+        We should not have been allowing non-void if-blocks that don't
+        have an else. Since this causes a value to be placed on the
+        stack that only appears under some control flow and not another.
+
+        * wasm/WasmValidate.cpp:
+
+2016-12-15  Filip Pizlo  
+
+        Get rid of HeapRootVisitor and make SlotVisitor less painful to use
+        https://bugs.webkit.org/show_bug.cgi?id=165911
+
+        Reviewed by Geoffrey Garen.
+        
+        Previously we had two ways of adding a raw pointer to the GC's mark stack:
+        
+        - SlotVisitor::appendUnbarrieredXYZ() methods
+        - HeapRootVisitor::visit() methods
+        
+        HeapRootVisitor existed only to prevent you from calling its non-WriteBarrier<> methods
+        unless you had permission. But SlotVisitor would let you do it anyway, because that was
+        a lot more practical.
+        
+        I think that we should just have one way to do it. This removes HeapRootVisitor. It
+        also renames appendUnbarrieredXYZ to appendUnbarriered, and it removes the use of extra
+        indirection (so you now pass const WriteBarrier<>& instead of WriteBarrier<>*).
+
+        * API/JSCallbackObject.h:
+        (JSC::JSCallbackObjectData::JSPrivatePropertyMap::visitChildren):
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * Scripts/builtins/builtins_templates.py:
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::visitWeakly):
+        (JSC::CodeBlock::visitChildren):
+        (JSC::CodeBlock::propagateTransitions):
+        (JSC::CodeBlock::determineLiveness):
+        (JSC::CodeBlock::visitOSRExitTargets):
+        (JSC::CodeBlock::stronglyVisitStrongReferences):
+        (JSC::CodeBlock::stronglyVisitWeakReferences):
+        * bytecode/DirectEvalCodeCache.cpp:
+        (JSC::DirectEvalCodeCache::visitAggregate):
+        * bytecode/InternalFunctionAllocationProfile.h:
+        (JSC::InternalFunctionAllocationProfile::visitAggregate):
+        * bytecode/ObjectAllocationProfile.h:
+        (JSC::ObjectAllocationProfile::visitAggregate):
+        * bytecode/PolymorphicAccess.cpp:
+        (JSC::AccessCase::propagateTransitions):
+        * bytecode/UnlinkedCodeBlock.cpp:
+        (JSC::UnlinkedCodeBlock::visitChildren):
+        * bytecode/UnlinkedFunctionExecutable.cpp:
+        (JSC::UnlinkedFunctionExecutable::visitChildren):
+        * debugger/DebuggerScope.cpp:
+        (JSC::DebuggerScope::visitChildren):
+        * dfg/DFGDesiredTransitions.cpp:
+        (JSC::DFG::DesiredTransition::visitChildren):
+        * dfg/DFGDesiredWeakReferences.cpp:
+        (JSC::DFG::DesiredWeakReferences::visitChildren):
+        * dfg/DFGGraph.cpp:
+        (JSC::DFG::Graph::visitChildren):
+        * dfg/DFGPlan.cpp:
+        (JSC::DFG::Plan::markCodeBlocks):
+        (JSC::DFG::Plan::checkLivenessAndVisitChildren):
+        * heap/HandleSet.cpp:
+        (JSC::HandleSet::visitStrongHandles):
+        * heap/HandleSet.h:
+        * heap/HandleStack.cpp:
+        (JSC::HandleStack::visit):
+        * heap/HandleStack.h:
+        * heap/Heap.cpp:
+        (JSC::Heap::markToFixpoint):
+        * heap/Heap.h:
+        * heap/HeapRootVisitor.h: Removed.
+        * heap/LargeAllocation.cpp:
+        (JSC::LargeAllocation::visitWeakSet):
+        * heap/LargeAllocation.h:
+        * heap/MarkedBlock.h:
+        (JSC::MarkedBlock::Handle::visitWeakSet):
+        * heap/MarkedSpace.cpp:
+        (JSC::MarkedSpace::visitWeakSets):
+        * heap/MarkedSpace.h:
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::appendUnbarriered):
+        * heap/SlotVisitor.h:
+        * heap/SlotVisitorInlines.h:
+        (JSC::SlotVisitor::appendUnbarriered):
+        (JSC::SlotVisitor::append):
+        (JSC::SlotVisitor::appendHidden):
+        (JSC::SlotVisitor::appendValues):
+        (JSC::SlotVisitor::appendValuesHidden):
+        (JSC::SlotVisitor::appendUnbarrieredPointer): Deleted.
+        (JSC::SlotVisitor::appendUnbarrieredReadOnlyPointer): Deleted.
+        (JSC::SlotVisitor::appendUnbarrieredValue): Deleted.
+        (JSC::SlotVisitor::appendUnbarrieredReadOnlyValue): Deleted.
+        (JSC::SlotVisitor::appendUnbarrieredWeak): Deleted.
+        * heap/WeakBlock.cpp:
+        (JSC::WeakBlock::specializedVisit):
+        (JSC::WeakBlock::visit):
+        * heap/WeakBlock.h:
+        * heap/WeakSet.h:
+        (JSC::WeakSet::visit):
+        * interpreter/ShadowChicken.cpp:
+        (JSC::ShadowChicken::visitChildren):
+        * jit/GCAwareJITStubRoutine.cpp:
+        (JSC::MarkingGCAwareJITStubRoutine::markRequiredObjectsInternal):
+        * jit/PolymorphicCallStubRoutine.cpp:
+        (JSC::PolymorphicCallStubRoutine::markRequiredObjectsInternal):
+        * jsc.cpp:
+        (WTF::Element::visitChildren):
+        (WTF::ImpureGetter::visitChildren):
+        (WTF::SimpleObject::visitChildren):
+        * runtime/AbstractModuleRecord.cpp:
+        (JSC::AbstractModuleRecord::visitChildren):
+        * runtime/ArgList.cpp:
+        (JSC::MarkedArgumentBuffer::markLists):
+        * runtime/ArgList.h:
+        * runtime/ClonedArguments.cpp:
+        (JSC::ClonedArguments::visitChildren):
+        * runtime/DirectArguments.cpp:
+        (JSC::DirectArguments::visitChildren):
+        * runtime/EvalExecutable.cpp:
+        (JSC::EvalExecutable::visitChildren):
+        * runtime/Exception.cpp:
+        (JSC::Exception::visitChildren):
+        * runtime/FunctionExecutable.cpp:
+        (JSC::FunctionExecutable::visitChildren):
+        * runtime/FunctionRareData.cpp:
+        (JSC::FunctionRareData::visitChildren):
+        * runtime/GetterSetter.cpp:
+        (JSC::GetterSetter::visitChildren):
+        * runtime/HashMapImpl.cpp:
+        (JSC::HashMapBucket::visitChildren):
+        (JSC::HashMapImpl::visitChildren):
+        * runtime/InferredTypeTable.cpp:
+        (JSC::InferredTypeTable::visitChildren):
+        * runtime/InternalFunction.cpp:
+        (JSC::InternalFunction::visitChildren):
+        * runtime/IntlCollator.cpp:
+        (JSC::IntlCollator::visitChildren):
+        * runtime/IntlCollatorConstructor.cpp:
+        (JSC::IntlCollatorConstructor::visitChildren):
+        * runtime/IntlDateTimeFormat.cpp:
+        (JSC::IntlDateTimeFormat::visitChildren):
+        * runtime/IntlDateTimeFormatConstructor.cpp:
+        (JSC::IntlDateTimeFormatConstructor::visitChildren):
+        * runtime/IntlNumberFormat.cpp:
+        (JSC::IntlNumberFormat::visitChildren):
+        * runtime/IntlNumberFormatConstructor.cpp:
+        (JSC::IntlNumberFormatConstructor::visitChildren):
+        * runtime/JSBoundFunction.cpp:
+        (JSC::JSBoundFunction::visitChildren):
+        * runtime/JSCallee.cpp:
+        (JSC::JSCallee::visitChildren):
+        * runtime/JSCellInlines.h:
+        (JSC::JSCell::visitChildren):
+        * runtime/JSCustomGetterSetterFunction.cpp:
+        (JSC::JSCustomGetterSetterFunction::visitChildren):
+        * runtime/JSFunction.cpp:
+        (JSC::JSFunction::visitChildren):
+        * runtime/JSGlobalObject.cpp:
+        (JSC::JSGlobalObject::visitChildren):
+        * runtime/JSMapIterator.cpp:
+        (JSC::JSMapIterator::visitChildren):
+        * runtime/JSModuleEnvironment.cpp:
+        (JSC::JSModuleEnvironment::visitChildren):
+        * runtime/JSModuleNamespaceObject.cpp:
+        (JSC::JSModuleNamespaceObject::visitChildren):
+        * runtime/JSModuleRecord.cpp:
+        (JSC::JSModuleRecord::visitChildren):
+        * runtime/JSNativeStdFunction.cpp:
+        (JSC::JSNativeStdFunction::visitChildren):
+        * runtime/JSObject.cpp:
+        (JSC::JSObject::visitButterflyImpl):
+        * runtime/JSPromiseDeferred.cpp:
+        (JSC::JSPromiseDeferred::visitChildren):
+        * runtime/JSPropertyNameEnumerator.cpp:
+        (JSC::JSPropertyNameEnumerator::visitChildren):
+        * runtime/JSPropertyNameIterator.cpp:
+        (JSC::JSPropertyNameIterator::visitChildren):
+        * runtime/JSProxy.cpp:
+        (JSC::JSProxy::visitChildren):
+        * runtime/JSScope.cpp:
+        (JSC::JSScope::visitChildren):
+        * runtime/JSSegmentedVariableObject.cpp:
+        (JSC::JSSegmentedVariableObject::visitChildren):
+        * runtime/JSSetIterator.cpp:
+        (JSC::JSSetIterator::visitChildren):
+        * runtime/JSString.cpp:
+        (JSC::JSRopeString::visitFibers):
+        * runtime/JSSymbolTableObject.cpp:
+        (JSC::JSSymbolTableObject::visitChildren):
+        * runtime/JSWeakMap.cpp:
+        (JSC::JSWeakMap::visitChildren):
+        * runtime/JSWeakSet.cpp:
+        (JSC::JSWeakSet::visitChildren):
+        * runtime/JSWithScope.cpp:
+        (JSC::JSWithScope::visitChildren):
+        * runtime/JSWrapperObject.cpp:
+        (JSC::JSWrapperObject::visitChildren):
+        * runtime/LazyClassStructure.cpp:
+        (JSC::LazyClassStructure::visit):
+        * runtime/LazyPropertyInlines.h:
+        (JSC::ElementType>::visit):
+        * runtime/MapBase.cpp:
+        (JSC::MapBase::visitChildren):
+        * runtime/ModuleProgramExecutable.cpp:
+        (JSC::ModuleProgramExecutable::visitChildren):
+        * runtime/NativeErrorConstructor.cpp:
+        (JSC::NativeErrorConstructor::visitChildren):
+        * runtime/ProgramExecutable.cpp:
+        (JSC::ProgramExecutable::visitChildren):
+        * runtime/ProxyObject.cpp:
+        (JSC::ProxyObject::visitChildren):
+        * runtime/ProxyRevoke.cpp:
+        (JSC::ProxyRevoke::visitChildren):
+        * runtime/RegExpCachedResult.cpp:
+        (JSC::RegExpCachedResult::visitChildren):
+        * runtime/RegExpObject.cpp:
+        (JSC::RegExpObject::visitChildren):
+        * runtime/RegExpPrototype.cpp:
+        (JSC::RegExpPrototype::visitChildren):
+        * runtime/SamplingProfiler.cpp:
+        (JSC::SamplingProfiler::visit):
+        * runtime/ScopedArguments.cpp:
+        (JSC::ScopedArguments::visitChildren):
+        * runtime/SmallStrings.cpp:
+        (JSC::SmallStrings::visitStrongReferences):
+        * runtime/SparseArrayValueMap.cpp:
+        (JSC::SparseArrayValueMap::visitChildren):
+        * runtime/Structure.cpp:
+        (JSC::Structure::visitChildren):
+        (JSC::Structure::markIfCheap):
+        * runtime/StructureChain.cpp:
+        (JSC::StructureChain::visitChildren):
+        * runtime/StructureRareData.cpp:
+        (JSC::StructureRareData::visitChildren):
+        * runtime/SymbolTable.cpp:
+        (JSC::SymbolTable::visitChildren):
+        * runtime/TypeProfilerLog.cpp:
+        (JSC::TypeProfilerLog::visit):
+        * runtime/WeakMapData.cpp:
+        (JSC::WeakMapData::DeadKeyCleaner::visitWeakReferences):
+        * wasm/js/JSWebAssemblyInstance.cpp:
+        (JSC::JSWebAssemblyInstance::visitChildren):
+        * wasm/js/JSWebAssemblyMemory.cpp:
+        (JSC::JSWebAssemblyMemory::visitChildren):
+        * wasm/js/JSWebAssemblyModule.cpp:
+        (JSC::JSWebAssemblyModule::visitChildren):
+        * wasm/js/JSWebAssemblyTable.cpp:
+        (JSC::JSWebAssemblyTable::visitChildren):
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::WebAssemblyFunction::visitChildren):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::visitChildren):
+
+2016-12-15  Myles C. Maxfield  
+
+        Sort Xcode project files
+        https://bugs.webkit.org/show_bug.cgi?id=165937
+
+        Reviewed by Simon Fraser.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+
+2016-12-15  Keith Miller  
+
+        Wasm should not create empty unlinked callsites
+        https://bugs.webkit.org/show_bug.cgi?id=165933
+
+        Reviewed by Mark Lam.
+
+        Wasm would create holes in the unlinked callsite vector if B3 was able to
+        eliminate the callsite.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::addCall):
+
+2016-12-15  JF Bastien  
+
+        WebAssembly: improve compilation error messages
+        https://bugs.webkit.org/show_bug.cgi?id=163919
+
+        Reviewed by Saam Barati.
+
+        The error handling messages were underwhelming because most
+        locations merely returned `false` on failure. This patch uses
+        std::expected to denote that failure isn't expected. Doing this
+        makes it almost impossible to mess up the code: either a function
+        returns a result (or a partial result for internal helpers) or an
+        error. We're not synchronizing the error string with the m_failed
+        bool anymore, and the caller will abort if they try to get a
+        result but the outcome was an error.
+
+        This also shortens the code significantly using macros, while also
+        judiciously preventing inlining of error handling code and biasing
+        towards success using UNLIKELY. This means that the generated code
+        should be more efficient (no string formatting on success, and
+        regalloc can avoid these unlikely paths).
+
+        The patch adds a few missing checks as well, especially related to
+        count limits and memory allocation failure.
+
+        As a follow-up I'd like to improve WTF::makeString further, so it
+        does coercions to string and understands ADL as I did in this
+        patch.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::fail):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmB3IRGenerator.h:
+        * wasm/WasmFormat.h:
+        (JSC::Wasm::isValidExternalKind):
+        (JSC::Wasm::makeString):
+        * wasm/WasmFunctionParser.h:
+        * wasm/WasmModuleParser.cpp:
+        * wasm/WasmModuleParser.h:
+        * wasm/WasmParser.h:
+        (JSC::Wasm::FailureHelper::makeString):
+        (JSC::Wasm::Parser::fail):
+        (JSC::Wasm::Parser::Parser):
+        (JSC::Wasm::Parser::consumeCharacter):
+        (JSC::Wasm::Parser::consumeString):
+        (JSC::Wasm::Parser::consumeUTF8String):
+        (JSC::Wasm::Parser::parseVarUInt32):
+        (JSC::Wasm::Parser::parseVarUInt64):
+        (JSC::Wasm::Parser::parseVarInt32):
+        (JSC::Wasm::Parser::parseVarInt64):
+        (JSC::Wasm::Parser::parseUInt32):
+        (JSC::Wasm::Parser::parseUInt64):
+        (JSC::Wasm::Parser::parseUInt8):
+        (JSC::Wasm::Parser::parseInt7):
+        (JSC::Wasm::Parser::parseUInt7):
+        (JSC::Wasm::Parser::parseVarUInt1):
+        (JSC::Wasm::Parser::parseResultType):
+        (JSC::Wasm::Parser::parseValueType):
+        (JSC::Wasm::Parser::parseExternalKind):
+        * wasm/WasmPlan.cpp:
+        (JSC::Wasm::Plan::run):
+        * wasm/WasmSections.h:
+        (JSC::Wasm::isValidSection):
+        (JSC::Wasm::validateOrder):
+        (JSC::Wasm::makeString):
+        * wasm/WasmValidate.cpp:
+        (JSC::Wasm::Validate::fail):
+        (JSC::Wasm::Validate::addUnreachable):
+        (JSC::Wasm::validateFunction):
+        * wasm/WasmValidate.h:
+        * wasm/generateWasmB3IRGeneratorInlinesHeader.py:
+        * wasm/generateWasmOpsHeader.py:
+        * wasm/generateWasmValidateInlinesHeader.py:
+        (loadMacro):
+        (storeMacro):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::link):
+
+2016-12-15  JF Bastien  
+
+        WebAssembly API: improve data section errors, initialize after Element
+        https://bugs.webkit.org/show_bug.cgi?id=165733
+
+        Reviewed by Keith Miller.
+
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseData): Data section without Memory section or import is a validation error
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::dataSegmentFail):
+        (JSC::WebAssemblyModuleRecord::evaluate): tighten checks (though the spec isn't fully baked), and move after Element initialization
+
+2016-12-15  Keith Miller  
+
+        Turn on WebAssembly by default
+        https://bugs.webkit.org/show_bug.cgi?id=165918
+
+        Reviewed by Saam Barati.
+
+        * runtime/Options.h:
+
+2016-12-15  Konstantin Tokarev  
+
+        Added missing override and final specifiers
+        https://bugs.webkit.org/show_bug.cgi?id=165903
+
+        Reviewed by Darin Adler.
+
+        * bytecompiler/BytecodeGenerator.h:
+        * jsc.cpp:
+        * parser/Nodes.h:
+
+2016-12-15  Chris Dumez  
+
+        Harden JSObject::getOwnPropertyDescriptor()
+        https://bugs.webkit.org/show_bug.cgi?id=165908
+
+        Reviewed by Geoffrey Garen.
+
+        * runtime/JSObject.cpp:
+        (JSC::JSObject::getOwnPropertyDescriptor):
+
+2016-12-15  Keith Miller  
+
+        Fix 64-bit shift family Wasm opcodes
+        https://bugs.webkit.org/show_bug.cgi?id=165902
+
+        Reviewed by Geoffrey Garen.
+
+        The Int64 versions of the shift family B3 opcodes take an Int32
+        for the shift value. Wasm, however, takes an i64, so we need to
+        Trunc the shift value. Also, this fixes a bug where shr_u mapped
+        to signed shift and shr_s mapped to the unsigned shift.
+
+        * wasm/wasm.json:
+
+2016-12-14  Keith Miller  
+
+        Wasm should decode constants correctly
+        https://bugs.webkit.org/show_bug.cgi?id=165886
+
+        Reviewed by Saam Barati.
+
+        This patch fixes how we decode the constant part of i32, i64, f32,
+        and f64.const opcodes.
+
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/wasm.json:
+
+2016-12-14  Saam Barati  
+
+        WebAssembly: Add various low hanging fruit that will allow us to run the LLVM torture tests in Wasm
+        https://bugs.webkit.org/show_bug.cgi?id=165883
+
+        Reviewed by Keith Miller.
+
+        This patch implements some low hanging fruit:
+        - Exporting Table
+        - Exporting Memory
+        - Load16 with zero extension to both 32 and 64 bit values.
+        - Fixes Unreachable to emit code that will prevent B3 from having a validation error.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::addUnreachable):
+        (JSC::Wasm::sizeOfLoadOp):
+        (JSC::Wasm::B3IRGenerator::emitLoadOp):
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseExport):
+        * wasm/WasmValidate.cpp:
+        (JSC::Wasm::Validate::addUnreachable):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::finishCreation):
+        (JSC::WebAssemblyModuleRecord::link):
+
+2016-12-14  Yusuke Suzuki  
+
+        Update ModuleLoader code by using the latest builtin primitives
+        https://bugs.webkit.org/show_bug.cgi?id=165851
+
+        Reviewed by Sam Weinig.
+
+        Update the module loader code,
+
+        1. Use @globalPrivate for the utilities, instead of setting them as the member of ModuleLoader.
+        2. Use @putByValDirect instead of @push. @push is user-observable since it uses Set() operation
+           and it can be observed by defining indexed setters in Array.prototype.
+
+        * builtins/ModuleLoaderPrototype.js:
+        (ensureRegistered):
+        (fulfillFetch):
+        (commitInstantiated):
+        (requestFetch):
+        (requestSatisfy):
+        (setStateToMax): Deleted.
+        (newRegistryEntry): Deleted.
+        * runtime/ModuleLoaderPrototype.cpp:
+
+2016-12-14  Michael Saboff  
+
+        The stress GC bot crashes in JavaScriptCore beneath ShadowChicken::update and Inspector::jsToInspectorValue
+        https://bugs.webkit.org/show_bug.cgi?id=165871
+
+        Reviewed by Mark Lam.
+
+        This fixes two issues with the VM watch dog timer firing in a worker.
+
+        The first issue has to do with bytecode ordering.  Prior to this change, the first few opcodes
+        generated when the watch dog is enabled are:
+                op_enter
+                op_watchdog
+                op_get_scope
+        When the watchdog fires, the function will get an exception at op_watchdog.  In processing that exception,
+        we'll try to update the ShadowChicken shadow stack.  That update assumes that if there is a scope 
+        VirtualRegister allocated, then the slot contains a valid JSScope.  With the current bytecode ordering,
+        this is not true at op_watchdog as op_enter will put JSUndefined in the scope slot.  It isn't until the
+        op_get_scope gets processed that we'll have a valid scope in the slot.  The fix for this issue is to 
+        ensure that op_get_scope happens before the op_watchdog.
+
+        The second issue is that ScriptFunctionCall::call() will not tell its caller that a terminated
+        execution exception happened.  Instead call() returns an empty JSValue.  InjectedScript::wrapCallFrames()
+        wasn't checking for an empty JSValue, but was passing it to another function.  Added a short circuit
+        return when call returns an empty JSValue.
+
+        Added  to fix other callers of ScriptFunctionCall::call()
+        to check for an empty JSValue return value.
+        Also tracked with .
+
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::BytecodeGenerator):
+        (JSC::BytecodeGenerator::emitEnter):
+        * inspector/InjectedScript.cpp:
+        (Inspector::InjectedScript::wrapCallFrames):
+
+2016-12-14  Filip Pizlo  
+
+        DirectTailCall implementation needs to tell the shuffler what to put into the ArgumentCount explicitly
+        https://bugs.webkit.org/show_bug.cgi?id=165882
+
+        Reviewed by Mark Lam.
+        
+        The CallFrameShuffler was assuming that the ArgumentCount that it should store into the
+        callee frame is simply the size of the args vector.
+        
+        That's not true for DirectTailCall, which will pad the args vector with undefined if we
+        are optimizing an arity mismatch. We need to pass the ArgumentCount explicitly in this
+        case.
+
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::compileDirectCallOrConstruct):
+        (JSC::FTL::DFG::LowerDFGToB3::compileTailCall):
+        * jit/CallFrameShuffleData.h:
+        * jit/CallFrameShuffler.cpp:
+        (JSC::CallFrameShuffler::CallFrameShuffler):
+        (JSC::CallFrameShuffler::prepareAny):
+        * jit/CallFrameShuffler.h:
+        (JSC::CallFrameShuffler::snapshot):
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileOpCall):
+
+2016-12-14  Keith Miller  
+
+        WebAssembly JS API: implement Global
+        https://bugs.webkit.org/show_bug.cgi?id=164133
+
+        Reviewed by Saam Barati.
+
+        This patch adds support for globals. It handles imports, exports
+        and internal globals. In the MVP only internal globals are allowed
+        to be mutable. This means we can store a C-array of 64-bit slots
+        off the instance holding them. When globals are exported to JS
+        they are done so as numbers. This means that i64 globals cannot be
+        imported or exported.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::B3IRGenerator::getGlobal):
+        (JSC::Wasm::B3IRGenerator::setGlobal):
+        (JSC::Wasm::B3IRGenerator::addCallIndirect):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmFormat.h:
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseImport):
+        (JSC::Wasm::ModuleParser::parseGlobal):
+        (JSC::Wasm::ModuleParser::parseExport):
+        (JSC::Wasm::ModuleParser::parseElement):
+        (JSC::Wasm::ModuleParser::parseInitExpr):
+        (JSC::Wasm::ModuleParser::parseGlobalType):
+        (JSC::Wasm::ModuleParser::parseData):
+        * wasm/WasmModuleParser.h:
+        * wasm/WasmParser.h:
+        (JSC::Wasm::Parser::parseVarInt32):
+        (JSC::Wasm::Parser::parseVarInt64):
+        (JSC::Wasm::Parser::parseUInt64):
+        * wasm/WasmValidate.cpp:
+        (JSC::Wasm::Validate::hasMemory):
+        (JSC::Wasm::Validate::Validate):
+        (JSC::Wasm::Validate::getGlobal):
+        (JSC::Wasm::Validate::setGlobal):
+        (JSC::Wasm::validateFunction):
+        * wasm/generateWasmOpsHeader.py:
+        * wasm/js/JSWebAssemblyInstance.cpp:
+        (JSC::JSWebAssemblyInstance::create):
+        (JSC::JSWebAssemblyInstance::finishCreation):
+        (JSC::JSWebAssemblyInstance::visitChildren):
+        * wasm/js/JSWebAssemblyInstance.h:
+        (JSC::JSWebAssemblyInstance::loadI32Global):
+        (JSC::JSWebAssemblyInstance::loadI64Global):
+        (JSC::JSWebAssemblyInstance::loadF32Global):
+        (JSC::JSWebAssemblyInstance::loadF64Global):
+        (JSC::JSWebAssemblyInstance::setGlobal):
+        (JSC::JSWebAssemblyInstance::offsetOfGlobals):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::finishCreation):
+        (JSC::WebAssemblyModuleRecord::link):
+
+2016-12-14  Filip Pizlo  
+
+        Unreviewed, re-enable concurrent GC on ARM64 now that the most likely culprit of the memory
+        regressions is fixed. Lets see what the bots think!
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-14  Filip Pizlo  
+
+        Devices with fewer cores should use a more aggressive GC schedule by default
+        https://bugs.webkit.org/show_bug.cgi?id=165859
+
+        Reviewed by Mark Lam.
+
+        * heap/Heap.cpp:
+        (JSC::Heap::markToFixpoint): Log when we have an unexpected delay in wake-up.
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::drainInParallelPassively): Don't drain passively if there aren't many cores.
+        * runtime/Options.cpp:
+        (JSC::overrideDefaults): Change the heuristics if we have fewer cores.
+        (JSC::Options::initialize):
+        * runtime/Options.h:
+
+2016-12-14  Mark Lam  
+
+        BytecodeBasicBlock::computeImpl() should not keep iterating blocks if all jump targets have already been found.
+        https://bugs.webkit.org/show_bug.cgi?id=165820
+
+        Reviewed by Saam Barati.
+
+        Currently, if an opcode is a branch type opcode, BytecodeBasicBlock::computeImpl()
+        will iterate over all basic blocks looking for the block containing the jump
+        target, and it will continue to do this even when all the jump targets have been
+        found.  This is wasted work, and all the more so given that most branch type
+        opcodes only have a single jump target.
+
+        * bytecode/BytecodeBasicBlock.cpp:
+        (JSC::BytecodeBasicBlock::computeImpl):
+
+2016-12-14  Gavin Barraclough  
+
+        MarkedBlock::marksConveyLivenessDuringMarking should take into account collection scope
+        https://bugs.webkit.org/show_bug.cgi?id=165741
+
+        Unreviewed, re-landing this with fix (revert erroneous change to Options).
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * heap/CellContainer.cpp: Added.
+        (JSC::CellContainer::isNewlyAllocated):
+        * heap/CellContainer.h:
+        * heap/MarkedAllocator.cpp:
+        (JSC::MarkedAllocator::addBlock):
+        (JSC::MarkedAllocator::removeBlock):
+        (JSC::MarkedAllocator::dumpBits):
+        * heap/MarkedAllocator.h:
+        (JSC::MarkedAllocator::forEachBitVector):
+        (JSC::MarkedAllocator::forEachBitVectorWithName):
+        * heap/MarkedBlock.cpp:
+        (JSC::MarkedBlock::tryCreate):
+        (JSC::MarkedBlock::Handle::~Handle):
+        (JSC::MarkedBlock::MarkedBlock):
+        (JSC::MarkedBlock::Handle::specializedSweep):
+        (JSC::MarkedBlock::Handle::sweepHelperSelectMarksMode):
+        (JSC::MarkedBlock::Handle::stopAllocating):
+        (JSC::MarkedBlock::Handle::resumeAllocating):
+        (JSC::MarkedBlock::aboutToMarkSlow):
+        (JSC::MarkedBlock::Handle::didConsumeFreeList):
+        (JSC::MarkedBlock::Handle::dumpState):
+        * heap/MarkedBlock.h:
+        (JSC::MarkedBlock::markingVersion):
+        (JSC::MarkedBlock::isMarkedRaw):
+        (JSC::MarkedBlock::isMarked):
+        * heap/MarkedBlockInlines.h:
+        (JSC::MarkedBlock::marksConveyLivenessDuringMarking):
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::appendJSCellOrAuxiliary):
+        * runtime/StructureIDTable.h:
+        (JSC::StructureIDTable::size):
+        (JSC::StructureIDTable::get):
+
+2016-12-14  Chris Dumez  
+
+        Unreviewed, rolling out r209766.
+
+        Regressed Dromaeo JSLib by ~50%
+
+        Reverted changeset:
+
+        "Make opaque root scanning truly constraint-based"
+        https://bugs.webkit.org/show_bug.cgi?id=165760
+        http://trac.webkit.org/changeset/209766
+
+2016-12-14  Commit Queue  
+
+        Unreviewed, rolling out r209795.
+        https://bugs.webkit.org/show_bug.cgi?id=165853
+
+        rolled out the wrong revision (Requested by pizlo on #webkit).
+
+        Reverted changeset:
+
+        "MarkedBlock::marksConveyLivenessDuringMarking should take
+        into account collection scope"
+        https://bugs.webkit.org/show_bug.cgi?id=165741
+        http://trac.webkit.org/changeset/209795
+
+2016-12-14  Filip Pizlo  
+
+        Unreviewed, disable concurrent GC on ARM while we investigate a memory use regression.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-13  Yusuke Suzuki  
+
+        Use JSValue::toWTFString instead of calling toString(exec) and value(exec)
+        https://bugs.webkit.org/show_bug.cgi?id=165795
+
+        Reviewed by Saam Barati.
+
+        In old days, we frequently use the idiom like, `value.toString(exec)->value(exec)` to
+        get WTFString from the given JSValue. But now, we have better function, `toWTFString`.
+        `toWTFString` does not create intermediate JSString objects, then reduce unnecessary
+        allocations.
+
+        This patch mechanically replaces `value.toString(exec)->value(exec)` with `toWTFString(exec)`.
+
+        * API/JSValueRef.cpp:
+        (JSValueToStringCopy):
+        * bindings/ScriptValue.cpp:
+        (Deprecated::ScriptValue::toString):
+        * inspector/JSGlobalObjectInspectorController.cpp:
+        (Inspector::JSGlobalObjectInspectorController::reportAPIException):
+        * inspector/JSInjectedScriptHost.cpp:
+        (Inspector::JSInjectedScriptHost::evaluateWithScopeExtension):
+        * inspector/JSJavaScriptCallFrame.cpp:
+        (Inspector::JSJavaScriptCallFrame::evaluateWithScopeExtension):
+        * inspector/ScriptCallStackFactory.cpp:
+        (Inspector::extractSourceInformationFromException):
+        * runtime/ConsoleObject.cpp:
+        (JSC::valueToStringWithUndefinedOrNullCheck):
+        (JSC::valueOrDefaultLabelString):
+        * runtime/DateConstructor.cpp:
+        (JSC::dateParse):
+        * runtime/DatePrototype.cpp:
+        (JSC::formatLocaleDate):
+        * runtime/ErrorInstance.cpp:
+        (JSC::ErrorInstance::sanitizedToString):
+        * runtime/ErrorPrototype.cpp:
+        (JSC::errorProtoFuncToString):
+        * runtime/InspectorInstrumentationObject.cpp:
+        (JSC::inspectorInstrumentationObjectLog):
+        * runtime/JSGlobalObjectFunctions.cpp:
+        (JSC::globalFuncEval):
+        * runtime/JSModuleLoader.cpp:
+        (JSC::JSModuleLoader::fetch):
+        * runtime/ModuleLoaderPrototype.cpp:
+        (JSC::moduleLoaderPrototypeParseModule):
+        * runtime/RegExpConstructor.cpp:
+        (JSC::regExpCreate):
+        * runtime/RegExpPrototype.cpp:
+        (JSC::regExpProtoFuncCompile):
+        (JSC::regExpProtoFuncToString):
+        * runtime/StringPrototype.cpp:
+        (JSC::replaceUsingRegExpSearch):
+        (JSC::replaceUsingStringSearch):
+        (JSC::stringProtoFuncSlice):
+        (JSC::stringProtoFuncSplitFast):
+        (JSC::stringProtoFuncSubstr):
+        (JSC::stringProtoFuncLocaleCompare):
+        (JSC::stringProtoFuncBig):
+        (JSC::stringProtoFuncSmall):
+        (JSC::stringProtoFuncBlink):
+        (JSC::stringProtoFuncBold):
+        (JSC::stringProtoFuncFixed):
+        (JSC::stringProtoFuncItalics):
+        (JSC::stringProtoFuncStrike):
+        (JSC::stringProtoFuncSub):
+        (JSC::stringProtoFuncSup):
+        (JSC::stringProtoFuncFontcolor):
+        (JSC::stringProtoFuncFontsize):
+        (JSC::stringProtoFuncAnchor):
+        (JSC::stringProtoFuncLink):
+        (JSC::trimString):
+        (JSC::stringProtoFuncStartsWith):
+        (JSC::stringProtoFuncEndsWith):
+        (JSC::stringProtoFuncIncludes):
+        (JSC::builtinStringIncludesInternal):
+        (JSC::stringProtoFuncNormalize):
+        * tools/JSDollarVMPrototype.cpp:
+        (JSC::functionPrint):
+        * wasm/js/JSWebAssemblyCompileError.h:
+        (JSC::JSWebAssemblyCompileError::create):
+        * wasm/js/JSWebAssemblyRuntimeError.h:
+        (JSC::JSWebAssemblyRuntimeError::create):
+
+2016-12-14  Gavin Barraclough  
+
+        MarkedBlock::marksConveyLivenessDuringMarking should take into account collection scope
+        https://bugs.webkit.org/show_bug.cgi?id=165741
+
+        Unreviewed rollout due to performance regression.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * heap/CellContainer.cpp: Removed.
+        * heap/CellContainer.h:
+        * heap/MarkedAllocator.cpp:
+        (JSC::MarkedAllocator::addBlock):
+        (JSC::MarkedAllocator::removeBlock):
+        (JSC::MarkedAllocator::dumpBits):
+        * heap/MarkedAllocator.h:
+        (JSC::MarkedAllocator::forEachBitVector):
+        (JSC::MarkedAllocator::forEachBitVectorWithName):
+        * heap/MarkedBlock.cpp:
+        (JSC::MarkedBlock::tryCreate):
+        (JSC::MarkedBlock::Handle::~Handle):
+        (JSC::MarkedBlock::MarkedBlock):
+        (JSC::MarkedBlock::Handle::specializedSweep):
+        (JSC::MarkedBlock::Handle::sweepHelperSelectMarksMode):
+        (JSC::MarkedBlock::Handle::stopAllocating):
+        (JSC::MarkedBlock::Handle::resumeAllocating):
+        (JSC::MarkedBlock::aboutToMarkSlow):
+        (JSC::MarkedBlock::Handle::didConsumeFreeList):
+        (JSC::MarkedBlock::Handle::dumpState): Deleted.
+        * heap/MarkedBlock.h:
+        (JSC::MarkedBlock::isMarked):
+        (JSC::MarkedBlock::markingVersion): Deleted.
+        (JSC::MarkedBlock::isMarkedRaw): Deleted.
+        * heap/MarkedBlockInlines.h:
+        (JSC::MarkedBlock::marksConveyLivenessDuringMarking):
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::appendJSCellOrAuxiliary):
+        * runtime/Options.h:
+        * runtime/StructureIDTable.h:
+        (JSC::StructureIDTable::get):
+        (JSC::StructureIDTable::size): Deleted.
+
+2016-12-13  Commit Queue  
+
+        Unreviewed, rolling out r209792.
+        https://bugs.webkit.org/show_bug.cgi?id=165841
+
+        Cause build failures (Requested by yusukesuzuki on #webkit).
+
+        Reverted changeset:
+
+        "Use JSValue::toWTFString instead of calling toString(exec)
+        and value(exec)"
+        https://bugs.webkit.org/show_bug.cgi?id=165795
+        http://trac.webkit.org/changeset/209792
+
+2016-12-13  Yusuke Suzuki  
+
+        Use JSValue::toWTFString instead of calling toString(exec) and value(exec)
+        https://bugs.webkit.org/show_bug.cgi?id=165795
+
+        Reviewed by Saam Barati.
+
+        In old days, we frequently use the idiom like, `value.toString(exec)->value(exec)` to
+        get WTFString from the given JSValue. But now, we have better function, `toWTFString`.
+        `toWTFString` does not create intermediate JSString objects, then reduce unnecessary
+        allocations.
+
+        This patch mechanically replaces `value.toString(exec)->value(exec)` with `toWTFString(exec)`.
+
+        * API/JSValueRef.cpp:
+        (JSValueToStringCopy):
+        * bindings/ScriptValue.cpp:
+        (Deprecated::ScriptValue::toString):
+        * inspector/JSGlobalObjectInspectorController.cpp:
+        (Inspector::JSGlobalObjectInspectorController::reportAPIException):
+        * inspector/JSInjectedScriptHost.cpp:
+        (Inspector::JSInjectedScriptHost::evaluateWithScopeExtension):
+        * inspector/JSJavaScriptCallFrame.cpp:
+        (Inspector::JSJavaScriptCallFrame::evaluateWithScopeExtension):
+        * inspector/ScriptCallStackFactory.cpp:
+        (Inspector::extractSourceInformationFromException):
+        * runtime/ConsoleObject.cpp:
+        (JSC::valueToStringWithUndefinedOrNullCheck):
+        (JSC::valueOrDefaultLabelString):
+        * runtime/DateConstructor.cpp:
+        (JSC::dateParse):
+        * runtime/DatePrototype.cpp:
+        (JSC::formatLocaleDate):
+        * runtime/ErrorInstance.cpp:
+        (JSC::ErrorInstance::sanitizedToString):
+        * runtime/ErrorPrototype.cpp:
+        (JSC::errorProtoFuncToString):
+        * runtime/InspectorInstrumentationObject.cpp:
+        (JSC::inspectorInstrumentationObjectLog):
+        * runtime/JSCJSValue.cpp:
+        (JSC::JSValue::toWTFStringSlowCase):
+        * runtime/JSGlobalObjectFunctions.cpp:
+        (JSC::globalFuncEval):
+        * runtime/JSModuleLoader.cpp:
+        (JSC::JSModuleLoader::fetch):
+        * runtime/ModuleLoaderPrototype.cpp:
+        (JSC::moduleLoaderPrototypeParseModule):
+        * runtime/RegExpConstructor.cpp:
+        (JSC::regExpCreate):
+        * runtime/RegExpPrototype.cpp:
+        (JSC::regExpProtoFuncCompile):
+        (JSC::regExpProtoFuncToString):
+        * runtime/StringPrototype.cpp:
+        (JSC::replaceUsingRegExpSearch):
+        (JSC::replaceUsingStringSearch):
+        (JSC::stringProtoFuncSlice):
+        (JSC::stringProtoFuncSplitFast):
+        (JSC::stringProtoFuncSubstr):
+        (JSC::stringProtoFuncLocaleCompare):
+        (JSC::stringProtoFuncBig):
+        (JSC::stringProtoFuncSmall):
+        (JSC::stringProtoFuncBlink):
+        (JSC::stringProtoFuncBold):
+        (JSC::stringProtoFuncFixed):
+        (JSC::stringProtoFuncItalics):
+        (JSC::stringProtoFuncStrike):
+        (JSC::stringProtoFuncSub):
+        (JSC::stringProtoFuncSup):
+        (JSC::stringProtoFuncFontcolor):
+        (JSC::stringProtoFuncFontsize):
+        (JSC::stringProtoFuncAnchor):
+        (JSC::stringProtoFuncLink):
+        (JSC::trimString):
+        (JSC::stringProtoFuncStartsWith):
+        (JSC::stringProtoFuncEndsWith):
+        (JSC::stringProtoFuncIncludes):
+        (JSC::builtinStringIncludesInternal):
+        (JSC::stringProtoFuncNormalize):
+        * tools/JSDollarVMPrototype.cpp:
+        (JSC::functionPrint):
+        * wasm/js/JSWebAssemblyCompileError.h:
+        (JSC::JSWebAssemblyCompileError::create):
+        * wasm/js/JSWebAssemblyRuntimeError.h:
+        (JSC::JSWebAssemblyRuntimeError::create):
+
+2016-12-13  Saam Barati  
+
+        WebAssembly: implement the elements section
+        https://bugs.webkit.org/show_bug.cgi?id=165715
+
+        Reviewed by Keith Miller.
+
+        This is a straight forward implementation of the Element
+        section in the Wasm spec:
+        https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#element-section
+        
+        There are a few ambiguities I encountered when implementing this, so I've
+        filed bugs against the Wasm design repo, and corresponding bugzilla bugs
+        for us to address after they've been discussed by the various Wasm folks:
+        - https://bugs.webkit.org/show_bug.cgi?id=165827
+        - https://bugs.webkit.org/show_bug.cgi?id=165826
+        - https://bugs.webkit.org/show_bug.cgi?id=165825
+
+        * wasm/WasmFormat.h:
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseElement):
+        (JSC::Wasm::ModuleParser::parseInitExpr):
+        (JSC::Wasm::ModuleParser::parseData):
+        * wasm/WasmModuleParser.h:
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::evaluate):
+
+2016-12-13  Chris Dumez  
+
+        Unreviewed, rolling out r209544.
+
+        Looks like r209489 did not cause the performance regression
+        after all
+
+        Reverted changeset:
+
+        "Unreviewed, rolling out r209489."
+        https://bugs.webkit.org/show_bug.cgi?id=165550
+        http://trac.webkit.org/changeset/209544
+
+2016-12-13  Saam Barati  
+
+        WebAssembly: implement the table section and table import
+        https://bugs.webkit.org/show_bug.cgi?id=165716
+
+        Reviewed by Keith Miller.
+
+        This patch implements the Table space for wasm:
+        https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#table-section
+
+        It only implements defining and importing a table. The bulk
+        of this patch is implementing the various wasm Table prototype
+        methods and the underlying Table object:
+        https://github.com/WebAssembly/design/blob/master/JS.md#webassemblytable-constructor
+
+        This patch also fixes a bug in our implementation with call_indirect.
+        We initially implemented call_indirect as a way to call functions that
+        are imported or defined in the module. This was the wrong
+        interpretation of the spec. Instead, call_indirect can only index into
+        the table index space.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::B3IRGenerator::addCallIndirect):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmFormat.h:
+        (JSC::Wasm::TableInformation::TableInformation):
+        (JSC::Wasm::TableInformation::operator bool):
+        (JSC::Wasm::TableInformation::isImport):
+        (JSC::Wasm::TableInformation::initial):
+        (JSC::Wasm::TableInformation::maximum):
+        (JSC::Wasm::CallableFunction::CallableFunction):
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseImport):
+        (JSC::Wasm::ModuleParser::parseResizableLimits):
+        (JSC::Wasm::ModuleParser::parseTableHelper):
+        (JSC::Wasm::ModuleParser::parseTable):
+        (JSC::Wasm::ModuleParser::parseMemoryHelper):
+        (JSC::Wasm::ModuleParser::parseExport):
+        * wasm/WasmModuleParser.h:
+        * wasm/js/JSWebAssemblyHelpers.h: Added.
+        (JSC::toNonWrappingUint32):
+        * wasm/js/JSWebAssemblyInstance.cpp:
+        (JSC::JSWebAssemblyInstance::visitChildren):
+        * wasm/js/JSWebAssemblyInstance.h:
+        (JSC::JSWebAssemblyInstance::table):
+        (JSC::JSWebAssemblyInstance::setTable):
+        (JSC::JSWebAssemblyInstance::offsetOfTable):
+        * wasm/js/JSWebAssemblyTable.cpp:
+        (JSC::JSWebAssemblyTable::create):
+        (JSC::JSWebAssemblyTable::JSWebAssemblyTable):
+        (JSC::JSWebAssemblyTable::visitChildren):
+        (JSC::JSWebAssemblyTable::grow):
+        (JSC::JSWebAssemblyTable::clearFunction):
+        (JSC::JSWebAssemblyTable::setFunction):
+        * wasm/js/JSWebAssemblyTable.h:
+        (JSC::JSWebAssemblyTable::maximum):
+        (JSC::JSWebAssemblyTable::size):
+        (JSC::JSWebAssemblyTable::getFunction):
+        (JSC::JSWebAssemblyTable::offsetOfSize):
+        (JSC::JSWebAssemblyTable::offsetOfFunctions):
+        (JSC::JSWebAssemblyTable::isValidSize):
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::WebAssemblyFunction::call):
+        (JSC::WebAssemblyFunction::create):
+        (JSC::WebAssemblyFunction::visitChildren):
+        (JSC::WebAssemblyFunction::finishCreation):
+        * wasm/js/WebAssemblyFunction.h:
+        (JSC::WebAssemblyFunction::signature):
+        (JSC::WebAssemblyFunction::wasmEntrypoint):
+        (JSC::WebAssemblyFunction::webAssemblyCallee): Deleted.
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyMemoryConstructor.cpp:
+        (JSC::constructJSWebAssemblyMemory):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::finishCreation):
+        (JSC::WebAssemblyModuleRecord::link):
+        * wasm/js/WebAssemblyTableConstructor.cpp:
+        (JSC::constructJSWebAssemblyTable):
+        * wasm/js/WebAssemblyTablePrototype.cpp:
+        (JSC::getTable):
+        (JSC::webAssemblyTableProtoFuncLength):
+        (JSC::webAssemblyTableProtoFuncGrow):
+        (JSC::webAssemblyTableProtoFuncGet):
+        (JSC::webAssemblyTableProtoFuncSet):
+        (JSC::WebAssemblyTablePrototype::create):
+        (JSC::WebAssemblyTablePrototype::finishCreation):
+        * wasm/js/WebAssemblyTablePrototype.h:
+
+2016-12-13  Filip Pizlo  
+
+        Add null checks to opaque root APIs.
+
+        Rubber stamped by Saam Barati. 
+
+        If we got a crash report about null in the opaque root HashSet, we would probably not
+        celebrate how great it is that we found out about a new race - instead we would probably
+        be annoyed that null wasn't just silently ignored.
+
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::addOpaqueRoot):
+        (JSC::SlotVisitor::containsOpaqueRoot):
+        (JSC::SlotVisitor::containsOpaqueRootTriState):
+
+2016-12-13  Filip Pizlo  
+
+        Make opaque root scanning truly constraint-based
+        https://bugs.webkit.org/show_bug.cgi?id=165760
+
+        Reviewed by Saam Barati.
+        
+        We have bugs when visitChildren() changes its mind about what opaque root to add, since
+        we don't have barriers on opaque roots. This supposedly once worked for generational GC,
+        and I started adding more barriers to support concurrent GC. But I think that the real
+        bug here is that we want the JSObject->OpaqueRoot to be evaluated as a constraint that
+        participates in the fixpoint. A constraint is different from the normal visiting in that
+        the GC will not wait for a barrier to rescan the object.
+        
+        So, it's now possible for any visitChildren() method to become a constraint by calling
+        slotVisitor.rescanAsConstraint(). Because opaque roots are constraints, addOpaqueRoot()
+        does rescanAsConstraint() for you.
+        
+        The constraint set is simply a HashSet that accumulates with every
+        rescanAsConstraint() call and is only cleared at the start of full GC. This trivially
+        resolves most classes of GC bugs that would have arisen from opaque roots being changed
+        in a way that the GC did not anticipate.
+        
+        Looks like this is perf-neutral.
+        
+        * heap/Heap.cpp:
+        (JSC::Heap::markToFixpoint):
+        (JSC::Heap::setMutatorShouldBeFenced):
+        (JSC::Heap::writeBarrierOpaqueRootSlow): Deleted.
+        (JSC::Heap::addMutatorShouldBeFencedCache): Deleted.
+        * heap/Heap.h:
+        * heap/HeapInlines.h:
+        (JSC::Heap::writeBarrierOpaqueRoot): Deleted.
+        * heap/MarkedSpace.cpp:
+        (JSC::MarkedSpace::visitWeakSets):
+        * heap/MarkedSpace.h:
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::visitChildren):
+        (JSC::SlotVisitor::visitSubsequently):
+        (JSC::SlotVisitor::drain):
+        (JSC::SlotVisitor::addOpaqueRoot):
+        (JSC::SlotVisitor::rescanAsConstraint):
+        (JSC::SlotVisitor::mergeIfNecessary):
+        (JSC::SlotVisitor::mergeOpaqueRootsAndConstraints):
+        (JSC::SlotVisitor::mergeOpaqueRootsIfNecessary): Deleted.
+        * heap/SlotVisitor.h:
+        * heap/SlotVisitorInlines.h:
+        (JSC::SlotVisitor::reportExtraMemoryVisited):
+        (JSC::SlotVisitor::reportExternalMemoryVisited):
+        (JSC::SlotVisitor::didNotRace):
+        * heap/WeakBlock.cpp:
+        (JSC::WeakBlock::specializedVisit):
+        (JSC::WeakBlock::visit):
+        * heap/WeakBlock.h:
+        * heap/WeakSet.h:
+        (JSC::WeakSet::visit):
+
+2016-12-13  Commit Queue  
+
+        Unreviewed, rolling out r209725.
+        https://bugs.webkit.org/show_bug.cgi?id=165811
+
+        "Broke ARMv7 builds" (Requested by msaboff on #webkit).
+
+        Reverted changeset:
+
+        "REGRESSION(r209653): speedometer crashes making virtual slow
+        path tailcalls"
+        https://bugs.webkit.org/show_bug.cgi?id=165748
+        http://trac.webkit.org/changeset/209725
+
+2016-12-13  Filip Pizlo  
+
+        Unreviewed, revert the collectorPermittedIdleRatio back to 0 because of 100MB
+        regression on membuster. Also, it didn't seem to help perf.
+
+        * runtime/Options.h:
+
+2016-12-13  JF Bastien  
+
+        [WTF] Turn tryMakeString(), makeString() into variadic templates
+        https://bugs.webkit.org/show_bug.cgi?id=147142
+
+        Reviewed by Mark Lam.
+
+        * runtime/JSStringBuilder.h:
+        (JSC::jsMakeNontrivialString): remove WTF:: prefix, it isn't needed anymore
+        * runtime/Lookup.cpp:
+        (JSC::reifyStaticAccessor): remove WTF:: prefix, it isn't needed anymore
+        * runtime/ObjectPrototype.cpp:
+        (JSC::objectProtoFuncToString): remove WTF:: prefix, it isn't needed anymore
+
+2016-12-12  Mark Lam  
+
+        Rename BytecodeGenerator's ControlFlowContext to ControlFlowScope.
+        https://bugs.webkit.org/show_bug.cgi?id=165777
+
+        Reviewed by Keith Miller.
+
+        The existing code sometimes refer to ControlFlowContext (and associated references)
+        as context, and sometimes as scope.  Let's be consistent and always call it a scope.
+
+        Also renamed push/popScopedControlFlowContext() to push/popLocalControlFlowScope()
+        because these are only used when we inc/dec the m_localScopeDepth.
+
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::initializeVarLexicalEnvironment):
+        (JSC::BytecodeGenerator::pushLexicalScopeInternal):
+        (JSC::BytecodeGenerator::popLexicalScopeInternal):
+        (JSC::BytecodeGenerator::emitPushWithScope):
+        (JSC::BytecodeGenerator::emitPopWithScope):
+        (JSC::BytecodeGenerator::pushFinallyControlFlowScope):
+        (JSC::BytecodeGenerator::pushIteratorCloseControlFlowScope):
+        (JSC::BytecodeGenerator::popFinallyControlFlowScope):
+        (JSC::BytecodeGenerator::popIteratorCloseControlFlowScope):
+        (JSC::BytecodeGenerator::emitComplexPopScopes):
+        (JSC::BytecodeGenerator::emitPopScopes):
+        (JSC::BytecodeGenerator::pushLocalControlFlowScope):
+        (JSC::BytecodeGenerator::popLocalControlFlowScope):
+        (JSC::BytecodeGenerator::emitEnumeration):
+        (JSC::BytecodeGenerator::pushFinallyContext): Deleted.
+        (JSC::BytecodeGenerator::pushIteratorCloseContext): Deleted.
+        (JSC::BytecodeGenerator::popFinallyContext): Deleted.
+        (JSC::BytecodeGenerator::popIteratorCloseContext): Deleted.
+        (JSC::BytecodeGenerator::pushScopedControlFlowContext): Deleted.
+        (JSC::BytecodeGenerator::popScopedControlFlowContext): Deleted.
+        * bytecompiler/BytecodeGenerator.h:
+        * bytecompiler/NodesCodegen.cpp:
+        (JSC::TryNode::emitBytecode):
+
+2016-12-12  Filip Pizlo  
+
+        GC scheduler should avoid consecutive pauses
+        https://bugs.webkit.org/show_bug.cgi?id=165758
+
+        Reviewed by Michael Saboff.
+        
+        This factors out the scheduler from lambdas in Heap::markToFixpoint to an actual class.
+        It's called the SpaceTimeScheduler because it is a linear controller that ties the
+        amount of time you spend on things to the amount of space you are using.
+        
+        This patch uses this refactoring to fix a bug where the GC would pause even though we
+        still had time during a mutator timeslice. This is a 15% improvement on
+        JetStream/splay-latency. Seems neutral on everything else. However, it's not at all
+        clear if this is the right policy or not since retreating wavefront can sometimes be so
+        sensitive to scheduling decisions. For this reason, there is a tunable option that lets
+        you decide how long the GC will sit idle before the start of its timeslice.
+        
+        So, we can revert this policy change in this patch without reverting the patch.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * heap/Heap.cpp:
+        (JSC::Heap::markToFixpoint):
+        * heap/Heap.h:
+        * heap/SpaceTimeScheduler.cpp: Added.
+        (JSC::SpaceTimeScheduler::Decision::targetMutatorUtilization):
+        (JSC::SpaceTimeScheduler::Decision::targetCollectorUtilization):
+        (JSC::SpaceTimeScheduler::Decision::elapsedInPeriod):
+        (JSC::SpaceTimeScheduler::Decision::phase):
+        (JSC::SpaceTimeScheduler::Decision::shouldBeResumed):
+        (JSC::SpaceTimeScheduler::Decision::timeToResume):
+        (JSC::SpaceTimeScheduler::Decision::timeToStop):
+        (JSC::SpaceTimeScheduler::SpaceTimeScheduler):
+        (JSC::SpaceTimeScheduler::snapPhase):
+        (JSC::SpaceTimeScheduler::currentDecision):
+        * heap/SpaceTimeScheduler.h: Added.
+        (JSC::SpaceTimeScheduler::Decision::Decision):
+        (JSC::SpaceTimeScheduler::Decision::operator bool):
+        * runtime/Options.h:
+
+2016-12-12  Michael Saboff  
+
+        REGRESSION(r209653): speedometer crashes making virtual slow path tailcalls
+        https://bugs.webkit.org/show_bug.cgi?id=165748
+
+        Reviewed by Filip Pizlo.
+
+        The virtual slow path for tailcalls always passes arguments on the stack.
+        The fix here is to link to the stack argument entrypoint instead of a register
+        argument entrypoint.
+
+        While fixing this bug, I found that we weren't clearing the code origin when
+        shuffling the call frame for a register argument tailcall.
+
+        Also rolling back in r209653, r209654, r209663, and r209673.
+
+        * jit/CallFrameShuffler.cpp:
+        (JSC::CallFrameShuffler::prepareAny):
+        * jit/ThunkGenerators.cpp:
+        (JSC::virtualThunkFor):
+
+2016-12-12  Mark Lam  
+
+        Rename BytecodeGenerator's m_symbolTableStack to m_lexicalScopeStack.
+        https://bugs.webkit.org/show_bug.cgi?id=165768
+
+        Reviewed by Saam Barati.
+
+        The lexical scope in "m_lexicalScopeStack" here refers to a pair of { } in the
+        source code that bounds the scope of variables.
+
+        There are 4 places in the code where we call m_symbolTableStack.append() to
+        append a new stack entry.  In only 3 of the 4 cases, a symbol table is provided
+        in the new stack entry.  In all 4 cases, a scope register is provided in the new
+        stack entry.
+
+        Also, 3 of the 4 functions that appends an entry to this stack are named:
+        1. initializeVarLexicalEnvironment()
+        2. pushLexicalScopeInternal()
+        3. emitPushWithScope()
+
+        The 4th function is the BytecodeGenerator constructor where it pushes the scope
+        for a module environment.
+
+        Based on these details, m_lexicalScopeStack is a better name for this stack than
+        m_symbolTableStack.
+
+        * bytecompiler/BytecodeGenerator.cpp:
+        (JSC::BytecodeGenerator::BytecodeGenerator):
+        (JSC::BytecodeGenerator::initializeArrowFunctionContextScopeIfNeeded):
+        (JSC::BytecodeGenerator::initializeVarLexicalEnvironment):
+        (JSC::BytecodeGenerator::pushLexicalScopeInternal):
+        (JSC::BytecodeGenerator::initializeBlockScopedFunctions):
+        (JSC::BytecodeGenerator::hoistSloppyModeFunctionIfNecessary):
+        (JSC::BytecodeGenerator::popLexicalScopeInternal):
+        (JSC::BytecodeGenerator::prepareLexicalScopeForNextForLoopIteration):
+        (JSC::BytecodeGenerator::variable):
+        (JSC::BytecodeGenerator::resolveType):
+        (JSC::BytecodeGenerator::emitResolveScope):
+        (JSC::BytecodeGenerator::emitPushWithScope):
+        (JSC::BytecodeGenerator::emitPopWithScope):
+        (JSC::BytecodeGenerator::pushFinallyContext):
+        (JSC::BytecodeGenerator::pushIteratorCloseContext):
+        (JSC::BytecodeGenerator::emitComplexPopScopes):
+        (JSC::BytecodeGenerator::popTryAndEmitCatch):
+        (JSC::BytecodeGenerator::emitPushFunctionNameScope):
+        * bytecompiler/BytecodeGenerator.h:
+
+2016-12-12  Saam Barati  
+
+        Unreviewed. Try to fix the cloop build.
+
+        * interpreter/StackVisitor.cpp:
+        (JSC::StackVisitor::Frame::calleeSaveRegisters):
+        * interpreter/StackVisitor.h:
+
+2016-12-12  Michael Saboff  
+
+        FTL: Dumping disassembly requires that code origin is set when making polymorphic tail calls.
+        https://bugs.webkit.org/show_bug.cgi?id=165747
+
+        Reviewed by Filip Pizlo.
+
+        Setting the code origin needs to be done for both the fast and slow path as we might need
+        it when linking a polymorphic or virtual call stub.
+
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::compileTailCall):
+
+2016-12-11  Saam Barati  
+
+        Unreviewed. Try to fix the linux build.
+
+        * runtime/StackFrame.h:
+
+2016-12-11  Saam Barati  
+
+        We should be able to throw exceptions from Wasm code and when Wasm frames are on the stack
+        https://bugs.webkit.org/show_bug.cgi?id=165429
+
+        Reviewed by Keith Miller.
+
+        This patch teaches the stack walking runtime about wasm.
+        To do this, I taught StackVisitor that a callee is not
+        always an object.
+
+        To be able to unwind callee save registers properly, I've given
+        JSWebAssemblyCallee a list of RegisterAtOffsetList for the callee
+        saves that B3 saved in the prologue. Also, because we have two
+        B3Compilations per wasm function, one for wasm entrypoint, and
+        one for the JS entrypoint, I needed to create a callee for each
+        because they each might spill callee save registers.
+
+        I also fixed a bug inside the Wasm::Memory constructor where we
+        were trying to mmap the same number of bytes even after the first
+        mmap failed. We should start by trying to mmap the maximum bytes,
+        and if that fails, fall back to the specified initial bytes. However,
+        the code was just mmapping the maximum twice. I've fixed that and
+        also added a RELEASE_ASSERT_NOT_REACHED() for when the second mmap
+        fails along with a FIXME to throw an OOM error.
+
+        There was a second bug I fixed where JSModuleRecord was calling
+        visitWeak on its CallLinkInfos inside ::visitChldren(). It needs
+        to do this after marking. I changed JSModuleRecord to do what
+        CodeBlock does and call visitWeak on its CallLinkInfos inside
+        an UnconditionalFinalizer.
+
+        * API/JSContextRef.cpp:
+        (BacktraceFunctor::operator()):
+        * inspector/ScriptCallStackFactory.cpp:
+        (Inspector::createScriptCallStackFromException):
+        * interpreter/CallFrame.cpp:
+        (JSC::CallFrame::vmEntryGlobalObject):
+        * interpreter/CallFrame.h:
+        (JSC::ExecState::callee):
+        * interpreter/Interpreter.cpp:
+        (JSC::GetStackTraceFunctor::operator()):
+        (JSC::UnwindFunctor::operator()):
+        (JSC::UnwindFunctor::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer):
+        * interpreter/Interpreter.h:
+        * interpreter/ShadowChicken.cpp:
+        (JSC::ShadowChicken::update):
+        * interpreter/StackVisitor.cpp:
+        (JSC::StackVisitor::StackVisitor):
+        (JSC::StackVisitor::readFrame):
+        (JSC::StackVisitor::readNonInlinedFrame):
+        (JSC::StackVisitor::readInlinedFrame):
+        (JSC::StackVisitor::Frame::isWasmFrame):
+        (JSC::StackVisitor::Frame::codeType):
+        (JSC::StackVisitor::Frame::calleeSaveRegisters):
+        (JSC::StackVisitor::Frame::functionName):
+        (JSC::StackVisitor::Frame::sourceURL):
+        (JSC::StackVisitor::Frame::toString):
+        (JSC::StackVisitor::Frame::hasLineAndColumnInfo):
+        (JSC::StackVisitor::Frame::setToEnd):
+        * interpreter/StackVisitor.h:
+        (JSC::StackVisitor::Frame::callee):
+        (JSC::StackVisitor::Frame::isNativeFrame):
+        (JSC::StackVisitor::Frame::isJSFrame): Deleted.
+        * jsc.cpp:
+        (callWasmFunction):
+        (functionTestWasmModuleFunctions):
+        * runtime/Error.cpp:
+        (JSC::addErrorInfoAndGetBytecodeOffset):
+        * runtime/JSCell.cpp:
+        (JSC::JSCell::isAnyWasmCallee):
+        * runtime/JSCell.h:
+        * runtime/JSFunction.cpp:
+        (JSC::RetrieveArgumentsFunctor::operator()):
+        (JSC::RetrieveCallerFunctionFunctor::operator()):
+        * runtime/StackFrame.cpp:
+        (JSC::StackFrame::sourceID):
+        (JSC::StackFrame::sourceURL):
+        (JSC::StackFrame::functionName):
+        (JSC::StackFrame::computeLineAndColumn):
+        (JSC::StackFrame::toString):
+        * runtime/StackFrame.h:
+        (JSC::StackFrame::StackFrame):
+        (JSC::StackFrame::hasLineAndColumnInfo):
+        (JSC::StackFrame::hasBytecodeOffset):
+        (JSC::StackFrame::bytecodeOffset):
+        (JSC::StackFrame::isNative): Deleted.
+        * runtime/VM.h:
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::createJSToWasmWrapper):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmCallingConvention.h:
+        (JSC::Wasm::CallingConvention::setupFrameInPrologue):
+        * wasm/WasmFormat.h:
+        * wasm/WasmMemory.cpp:
+        (JSC::Wasm::Memory::Memory):
+        * wasm/WasmMemory.h:
+        (JSC::Wasm::Memory::isValid):
+        * wasm/WasmPlan.cpp:
+        (JSC::Wasm::Plan::run):
+        (JSC::Wasm::Plan::initializeCallees):
+        * wasm/WasmPlan.h:
+        (JSC::Wasm::Plan::jsToWasmEntryPointForFunction): Deleted.
+        * wasm/js/JSWebAssemblyCallee.cpp:
+        (JSC::JSWebAssemblyCallee::finishCreation):
+        * wasm/js/JSWebAssemblyCallee.h:
+        (JSC::JSWebAssemblyCallee::create):
+        (JSC::JSWebAssemblyCallee::entrypoint):
+        (JSC::JSWebAssemblyCallee::calleeSaveRegisters):
+        (JSC::JSWebAssemblyCallee::jsToWasmEntryPoint): Deleted.
+        * wasm/js/JSWebAssemblyModule.cpp:
+        (JSC::JSWebAssemblyModule::JSWebAssemblyModule):
+        (JSC::JSWebAssemblyModule::visitChildren):
+        (JSC::JSWebAssemblyModule::UnconditionalFinalizer::finalizeUnconditionally):
+        * wasm/js/JSWebAssemblyModule.h:
+        (JSC::JSWebAssemblyModule::jsEntrypointCalleeFromFunctionIndexSpace):
+        (JSC::JSWebAssemblyModule::wasmEntrypointCalleeFromFunctionIndexSpace):
+        (JSC::JSWebAssemblyModule::setJSEntrypointCallee):
+        (JSC::JSWebAssemblyModule::setWasmEntrypointCallee):
+        (JSC::JSWebAssemblyModule::allocationSize):
+        (JSC::JSWebAssemblyModule::calleeFromFunctionIndexSpace): Deleted.
+        * wasm/js/JSWebAssemblyRuntimeError.h:
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::WebAssemblyFunction::call):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyMemoryConstructor.cpp:
+        (JSC::constructJSWebAssemblyMemory):
+        * wasm/js/WebAssemblyModuleConstructor.cpp:
+        (JSC::constructJSWebAssemblyModule):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::link):
+
+2016-12-11  Filip Pizlo  
+
+        Re-enable concurrent GC.
+
+        Rubber stampted by Saam Barati.
+        
+        This change actually landed in r209692 by accident.
+
+        * runtime/Options.h:
+
+2016-12-10  Filip Pizlo  
+
+        MarkedBlock::marksConveyLivenessDuringMarking should take into account collection scope
+        https://bugs.webkit.org/show_bug.cgi?id=165741
+
+        Reviewed by Saam Barati.
+        
+        MarkedBlock::marksConveyLivenessDuringMarking thought that the off-by-one marking
+        version indicated liveness during any collection when it's just during full collection.
+        One of its users - MarkedBlock::sweep - knew this and had a special case, but the other
+        one - MarkedBlock::isLive - didn't. So, I moved the special case into
+        marksConveyLivenessDuringMarking.
+        
+        Also, this cleans up some remaining bitvector races.
+        
+        To find this bug, I significantly strengthened our assertions.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * heap/CellContainer.cpp: Added.
+        (JSC::CellContainer::isNewlyAllocated):
+        * heap/CellContainer.h:
+        * heap/MarkedAllocator.cpp:
+        (JSC::MarkedAllocator::addBlock):
+        (JSC::MarkedAllocator::removeBlock):
+        (JSC::MarkedAllocator::dumpBits):
+        * heap/MarkedAllocator.h:
+        (JSC::MarkedAllocator::forEachBitVector):
+        (JSC::MarkedAllocator::forEachBitVectorWithName):
+        * heap/MarkedBlock.cpp:
+        (JSC::MarkedBlock::tryCreate):
+        (JSC::MarkedBlock::Handle::~Handle):
+        (JSC::MarkedBlock::MarkedBlock):
+        (JSC::MarkedBlock::Handle::specializedSweep):
+        (JSC::MarkedBlock::Handle::sweepHelperSelectMarksMode):
+        (JSC::MarkedBlock::Handle::stopAllocating):
+        (JSC::MarkedBlock::Handle::resumeAllocating):
+        (JSC::MarkedBlock::aboutToMarkSlow):
+        (JSC::MarkedBlock::Handle::didConsumeFreeList):
+        (JSC::MarkedBlock::Handle::dumpState):
+        * heap/MarkedBlock.h:
+        (JSC::MarkedBlock::markingVersion):
+        (JSC::MarkedBlock::isMarkedRaw):
+        (JSC::MarkedBlock::isMarked):
+        * heap/MarkedBlockInlines.h:
+        (JSC::MarkedBlock::marksConveyLivenessDuringMarking):
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::appendJSCellOrAuxiliary):
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+        * runtime/StructureIDTable.h:
+        (JSC::StructureIDTable::size):
+        (JSC::StructureIDTable::get):
+
+2016-12-10  Filip Pizlo  
+
+        The DOM should have an advancing wavefront opaque root barrier
+        https://bugs.webkit.org/show_bug.cgi?id=165712
+
+        Reviewed by Yusuke Suzuki.
+        
+        This exposes the ability to fire an advancing wavefront barrier on opaque roots. It also
+        gives clients the ability to maintain their own cache of whether that barrier needs to
+        be enabled.
+        
+        The DOM uses this to enable a very cheap barrier on the DOM. This is neutral on
+        Speedometer and fixes another concurrent GC crash.
+
+        * heap/Heap.cpp:
+        (JSC::Heap::beginMarking):
+        (JSC::Heap::endMarking):
+        (JSC::Heap::writeBarrierOpaqueRootSlow):
+        (JSC::Heap::addMutatorShouldBeFencedCache):
+        (JSC::Heap::setMutatorShouldBeFenced):
+        * heap/Heap.h:
+        * heap/HeapInlines.h:
+        (JSC::writeBarrierOpaqueRoot):
+
+2016-12-10  Commit Queue  
+
+        Unreviewed, rolling out r209653, r209654, r209663, and
+        r209673.
+        https://bugs.webkit.org/show_bug.cgi?id=165739
+
+        speedometer crashes (Requested by pizlo on #webkit).
+
+        Reverted changesets:
+
+        "JSVALUE64: Pass arguments in platform argument registers when
+        making JavaScript calls"
+        https://bugs.webkit.org/show_bug.cgi?id=160355
+        http://trac.webkit.org/changeset/209653
+
+        "Unreviewed build fix for 32 bit builds."
+        http://trac.webkit.org/changeset/209654
+
+        "Unreviewed build fix for the CLOOP after r209653"
+        http://trac.webkit.org/changeset/209663
+
+        "REGRESSION(r209653) Crash in CallFrameShuffler::snapshot()"
+        https://bugs.webkit.org/show_bug.cgi?id=165728
+        http://trac.webkit.org/changeset/209673
+
+2016-12-10  Michael Saboff  
+
+        REGRESSION(r209653) Crash in CallFrameShuffler::snapshot()
+        https://bugs.webkit.org/show_bug.cgi?id=165728
+
+        Reviewed by Filip Pizlo.
+
+        It can be the case that a JSValueReg's CachedRecovery is the source for mutliple
+        GPRs. We only store the CachedRecovery in one slot of m_newRegisters to simplify
+        the recovery process. This is also done for the case where the recovery source
+        and destination are the same GPR.
+
+        In light of this change, snapshot needs to be taught that one CacheRecovery is
+        the source for multiple registers.  This is done by using a two step process.
+        First find all the argument CachedRecovery's and create a vector mapping all of
+        the target GPRs and the source recovery.  Then use that vector to get the
+        recovery for each register.
+
+        * jit/CallFrameShuffler.h:
+        (JSC::CallFrameShuffler::snapshot):
+
+2016-12-10  Keith Miller  
+
+        Fix indirect_call if the result type is used.
+        https://bugs.webkit.org/show_bug.cgi?id=165727
+
+        Reviewed by Michael Saboff.
+
+        The patchpoint for indirect_call assumed that the callee would be
+        in params[0]. This is not the case, however, if the callee returns
+        a value.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::addCallIndirect):
+
+2016-12-10  Konstantin Tokarev  
+
+        [cmake] Include WTF, JSC, and WebCore headers automatically to targers using them
+        https://bugs.webkit.org/show_bug.cgi?id=165686
+
+        Reviewed by Michael Catanzaro.
+
+        This change reduces duplication of include path lists between modules,
+        and reduces future need for fixes like r209605 (broken build because of
+        WebCore header suddenly becoming used in WebKit2).
+
+        * CMakeLists.txt:
+        * PlatformEfl.cmake:
+        * PlatformGTK.cmake:
+        * PlatformJSCOnly.cmake:
+        * PlatformMac.cmake:
+
+2016-12-10  Michael Saboff  
+
+        Unreviewed build fix for the CLOOP after r209653
+
+        * jit/GPRInfo.h:
+        Provided a definition for NUMBER_OF_JS_FUNCTION_ARGUMENT_REGISTERS when the JIT is disabled.
+        * jit/JITEntryPoints.h:
+        Removed #if ENABLE(JIT) protection around contents.
+
+2016-12-10  Yusuke Suzuki  
+
+        [JSC] Module namespace object behaves like immutable prototype exotic object
+        https://bugs.webkit.org/show_bug.cgi?id=165598
+
+        Reviewed by Mark Lam.
+
+        In the latest ECMA262 draft, the module namespace object behaves like immutable prototype exotic object.
+        https://tc39.github.io/ecma262/#sec-module-namespace-exotic-objects-setprototypeof-v
+
+        * runtime/JSModuleNamespaceObject.h:
+
+2016-12-10  Yusuke Suzuki  
+
+        REGRESSION(r208791): Assertion in testb3
+        https://bugs.webkit.org/show_bug.cgi?id=165651
+
+        Reviewed by Saam Barati.
+
+        Accidentally we always use edx/rdx for the result of UDiv/UMod.
+        But it is incorrect. We should use eax/rax for the result of UDiv.
+
+        * b3/B3LowerToAir.cpp:
+        (JSC::B3::Air::LowerToAir::lowerX86UDiv):
+
+2016-12-09  Michael Saboff  
+
+        Unreviewed build fix for 32 bit builds.
+
+        * dfg/DFGMinifiedNode.h:
+        (JSC::DFG::MinifiedNode::argumentIndex): Added a static_cast().
+
+2016-12-09  Michael Saboff  
+
+        JSVALUE64: Pass arguments in platform argument registers when making JavaScript calls
+        https://bugs.webkit.org/show_bug.cgi?id=160355
+
+        Reviewed by Filip Pizlo.
+
+        This patch implements passing JavaScript function arguments in registers for 64 bit platforms.
+
+        The implemented convention follows the ABI conventions for the associated platform.
+        The first two arguments are the callee and argument count, the rest of the argument registers
+        contain "this" and following argument until all platform argument registers are exhausted.
+        Arguments beyond what fit in registers are placed on the stack in the same location as
+        before this patch.
+
+        For X86-64 non-Windows platforms, there are 6 argument registers specified in the related ABI.
+        ARM64 has had argument registers.  This allows for 4 or 6 parameter values to be placed in
+        registers on these respective platforms.  This patch doesn't implement passing arguments in
+        registers for 32 bit platform, since most platforms have at most 4 argument registers
+        specified and 32 bit platforms use two 32 bit registers/memory locations to store one JSValue.
+
+        The call frame on the stack in unchanged in format and the arguments that are passed in
+        registers use the corresponding call frame location as a spill location. Arguments can
+        also be passed on the stack. The LLInt, baseline JIT'ed code as well as the initial entry
+        from C++ code base arguments on the stack. DFG s and FTL generated code pass arguments
+        via registers. All callees can accept arguments either in registers or on the stack.
+        The callee is responsible for moving argument to its preferred location.
+
+        The multiple entry points to JavaSCript code is now handled via the JITEntryPoints class and
+        related code.  That class now has entries for StackArgsArityCheckNotRequired,
+        StackArgsMustCheckArity and for platforms that support registers arguments,
+        RegisterArgsArityCheckNotRequired, RegisterArgsMustCheckArity as well as and additional
+        RegisterArgsPossibleExtraArgs entry point when extra registers argument are passed.
+        This last case is needed to spill those extra arguments to the corresponding call frame
+        slots.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * b3/B3ArgumentRegValue.h:
+        * b3/B3Validate.cpp:
+        * bytecode/CallLinkInfo.cpp:
+        (JSC::CallLinkInfo::CallLinkInfo):
+        * bytecode/CallLinkInfo.h:
+        (JSC::CallLinkInfo::setUpCall):
+        (JSC::CallLinkInfo::argumentsLocation):
+        (JSC::CallLinkInfo::argumentsInRegisters):
+        * bytecode/PolymorphicAccess.cpp:
+        (JSC::AccessCase::generateImpl):
+        * dfg/DFGAbstractInterpreterInlines.h:
+        (JSC::DFG::AbstractInterpreter::executeEffects):
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::parseBlock):
+        * dfg/DFGCPSRethreadingPhase.cpp:
+        (JSC::DFG::CPSRethreadingPhase::canonicalizeLocalsInBlock):
+        (JSC::DFG::CPSRethreadingPhase::specialCaseArguments):
+        (JSC::DFG::CPSRethreadingPhase::computeIsFlushed):
+        * dfg/DFGClobberize.h:
+        (JSC::DFG::clobberize):
+        * dfg/DFGCommon.h:
+        * dfg/DFGDCEPhase.cpp:
+        (JSC::DFG::DCEPhase::run):
+        * dfg/DFGDoesGC.cpp:
+        (JSC::DFG::doesGC):
+        * dfg/DFGDriver.cpp:
+        (JSC::DFG::compileImpl):
+        * dfg/DFGFixupPhase.cpp:
+        (JSC::DFG::FixupPhase::fixupNode):
+        * dfg/DFGGenerationInfo.h:
+        (JSC::DFG::GenerationInfo::initArgumentRegisterValue):
+        * dfg/DFGGraph.cpp:
+        (JSC::DFG::Graph::dump):
+        (JSC::DFG::Graph::methodOfGettingAValueProfileFor):
+        * dfg/DFGGraph.h:
+        (JSC::DFG::Graph::needsFlushedThis):
+        (JSC::DFG::Graph::addImmediateShouldSpeculateInt32):
+        * dfg/DFGInPlaceAbstractState.cpp:
+        (JSC::DFG::InPlaceAbstractState::initialize):
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::link):
+        (JSC::DFG::JITCompiler::compile):
+        (JSC::DFG::JITCompiler::compileFunction):
+        (JSC::DFG::JITCompiler::compileEntry): Deleted.
+        * dfg/DFGJITCompiler.h:
+        (JSC::DFG::JITCompiler::addJSDirectCall):
+        (JSC::DFG::JITCompiler::JSDirectCallRecord::JSDirectCallRecord):
+        (JSC::DFG::JITCompiler::JSDirectCallRecord::hasSlowCall):
+        * dfg/DFGJITFinalizer.cpp:
+        (JSC::DFG::JITFinalizer::JITFinalizer):
+        (JSC::DFG::JITFinalizer::finalize):
+        (JSC::DFG::JITFinalizer::finalizeFunction):
+        * dfg/DFGJITFinalizer.h:
+        * dfg/DFGLiveCatchVariablePreservationPhase.cpp:
+        (JSC::DFG::LiveCatchVariablePreservationPhase::handleBlock):
+        * dfg/DFGMaximalFlushInsertionPhase.cpp:
+        (JSC::DFG::MaximalFlushInsertionPhase::treatRegularBlock):
+        (JSC::DFG::MaximalFlushInsertionPhase::treatRootBlock):
+        * dfg/DFGMayExit.cpp:
+        * dfg/DFGMinifiedNode.cpp:
+        (JSC::DFG::MinifiedNode::fromNode):
+        * dfg/DFGMinifiedNode.h:
+        (JSC::DFG::belongsInMinifiedGraph):
+        * dfg/DFGNode.cpp:
+        (JSC::DFG::Node::hasVariableAccessData):
+        * dfg/DFGNode.h:
+        (JSC::DFG::Node::accessesStack):
+        (JSC::DFG::Node::setVariableAccessData):
+        (JSC::DFG::Node::hasArgumentRegisterIndex):
+        (JSC::DFG::Node::argumentRegisterIndex):
+        * dfg/DFGNodeType.h:
+        * dfg/DFGOSRAvailabilityAnalysisPhase.cpp:
+        (JSC::DFG::LocalOSRAvailabilityCalculator::executeNode):
+        * dfg/DFGOSREntrypointCreationPhase.cpp:
+        (JSC::DFG::OSREntrypointCreationPhase::run):
+        * dfg/DFGPlan.cpp:
+        (JSC::DFG::Plan::compileInThreadImpl):
+        * dfg/DFGPreciseLocalClobberize.h:
+        (JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
+        * dfg/DFGPredictionInjectionPhase.cpp:
+        (JSC::DFG::PredictionInjectionPhase::run):
+        * dfg/DFGPredictionPropagationPhase.cpp:
+        * dfg/DFGPutStackSinkingPhase.cpp:
+        * dfg/DFGRegisterBank.h:
+        (JSC::DFG::RegisterBank::iterator::unlock):
+        (JSC::DFG::RegisterBank::unlockAtIndex):
+        * dfg/DFGSSAConversionPhase.cpp:
+        (JSC::DFG::SSAConversionPhase::run):
+        * dfg/DFGSafeToExecute.h:
+        (JSC::DFG::safeToExecute):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::SpeculativeJIT):
+        (JSC::DFG::SpeculativeJIT::clearGenerationInfo):
+        (JSC::DFG::dumpRegisterInfo):
+        (JSC::DFG::SpeculativeJIT::dump):
+        (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
+        (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+        (JSC::DFG::SpeculativeJIT::setupArgumentRegistersForEntry):
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGSpeculativeJIT.h:
+        (JSC::DFG::SpeculativeJIT::allocate):
+        (JSC::DFG::SpeculativeJIT::spill):
+        (JSC::DFG::SpeculativeJIT::generationInfoFromVirtualRegister):
+        (JSC::DFG::JSValueOperand::JSValueOperand):
+        (JSC::DFG::JSValueOperand::gprUseSpecific):
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::fillJSValue):
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGStrengthReductionPhase.cpp:
+        (JSC::DFG::StrengthReductionPhase::handleNode):
+        * dfg/DFGThunks.cpp:
+        (JSC::DFG::osrEntryThunkGenerator):
+        * dfg/DFGVariableEventStream.cpp:
+        (JSC::DFG::VariableEventStream::reconstruct):
+        * dfg/DFGVirtualRegisterAllocationPhase.cpp:
+        (JSC::DFG::VirtualRegisterAllocationPhase::allocateRegister):
+        (JSC::DFG::VirtualRegisterAllocationPhase::run):
+        * ftl/FTLCapabilities.cpp:
+        (JSC::FTL::canCompile):
+        * ftl/FTLJITCode.cpp:
+        (JSC::FTL::JITCode::~JITCode):
+        (JSC::FTL::JITCode::initializeEntrypointThunk):
+        (JSC::FTL::JITCode::setEntryFor):
+        (JSC::FTL::JITCode::addressForCall):
+        (JSC::FTL::JITCode::executableAddressAtOffset):
+        (JSC::FTL::JITCode::initializeAddressForCall): Deleted.
+        (JSC::FTL::JITCode::initializeArityCheckEntrypoint): Deleted.
+        * ftl/FTLJITCode.h:
+        * ftl/FTLJITFinalizer.cpp:
+        (JSC::FTL::JITFinalizer::finalizeFunction):
+        * ftl/FTLLink.cpp:
+        (JSC::FTL::link):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::lower):
+        (JSC::FTL::DFG::LowerDFGToB3::compileNode):
+        (JSC::FTL::DFG::LowerDFGToB3::compileGetArgumentRegister):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstruct):
+        (JSC::FTL::DFG::LowerDFGToB3::compileDirectCallOrConstruct):
+        (JSC::FTL::DFG::LowerDFGToB3::compileTailCall):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargsSpread):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
+        (JSC::FTL::DFG::LowerDFGToB3::compileCallEval):
+        * ftl/FTLOSREntry.cpp:
+        (JSC::FTL::prepareOSREntry):
+        * ftl/FTLOutput.cpp:
+        (JSC::FTL::Output::argumentRegister):
+        (JSC::FTL::Output::argumentRegisterInt32):
+        * ftl/FTLOutput.h:
+        * interpreter/ShadowChicken.cpp:
+        (JSC::ShadowChicken::update):
+        * jit/AssemblyHelpers.cpp:
+        (JSC::AssemblyHelpers::emitDumbVirtualCall):
+        * jit/AssemblyHelpers.h:
+        (JSC::AssemblyHelpers::spillArgumentRegistersToFrameBeforePrologue):
+        (JSC::AssemblyHelpers::spillArgumentRegistersToFrame):
+        (JSC::AssemblyHelpers::fillArgumentRegistersFromFrameBeforePrologue):
+        (JSC::AssemblyHelpers::emitPutArgumentToCallFrameBeforePrologue):
+        (JSC::AssemblyHelpers::emitPutArgumentToCallFrame):
+        (JSC::AssemblyHelpers::emitGetFromCallFrameHeaderBeforePrologue):
+        (JSC::AssemblyHelpers::emitGetFromCallFrameArgumentBeforePrologue):
+        (JSC::AssemblyHelpers::emitGetPayloadFromCallFrameHeaderBeforePrologue):
+        (JSC::AssemblyHelpers::incrementCounter):
+        * jit/CachedRecovery.cpp:
+        (JSC::CachedRecovery::addTargetJSValueRegs):
+        * jit/CachedRecovery.h:
+        (JSC::CachedRecovery::gprTargets):
+        (JSC::CachedRecovery::setWantedFPR):
+        (JSC::CachedRecovery::wantedJSValueRegs):
+        (JSC::CachedRecovery::setWantedJSValueRegs): Deleted.
+        * jit/CallFrameShuffleData.h:
+        * jit/CallFrameShuffler.cpp:
+        (JSC::CallFrameShuffler::CallFrameShuffler):
+        (JSC::CallFrameShuffler::dump):
+        (JSC::CallFrameShuffler::tryWrites):
+        (JSC::CallFrameShuffler::prepareAny):
+        * jit/CallFrameShuffler.h:
+        (JSC::CallFrameShuffler::snapshot):
+        (JSC::CallFrameShuffler::addNew):
+        (JSC::CallFrameShuffler::initDangerFrontier):
+        (JSC::CallFrameShuffler::updateDangerFrontier):
+        (JSC::CallFrameShuffler::findDangerFrontierFrom):
+        * jit/CallFrameShuffler64.cpp:
+        (JSC::CallFrameShuffler::emitDisplace):
+        * jit/GPRInfo.h:
+        (JSC::JSValueRegs::operator==):
+        (JSC::JSValueRegs::operator!=):
+        (JSC::GPRInfo::toArgumentIndex):
+        (JSC::argumentRegisterFor):
+        (JSC::argumentRegisterForCallee):
+        (JSC::argumentRegisterForArgumentCount):
+        (JSC::argumentRegisterIndexForJSFunctionArgument):
+        (JSC::jsFunctionArgumentForArgumentRegister):
+        (JSC::argumentRegisterForFunctionArgument):
+        (JSC::numberOfRegisterArgumentsFor):
+        * jit/JIT.cpp:
+        (JSC::JIT::compileWithoutLinking):
+        (JSC::JIT::link):
+        (JSC::JIT::compileCTINativeCall): Deleted.
+        * jit/JIT.h:
+        (JSC::JIT::compileNativeCallEntryPoints):
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileSetupVarargsFrame):
+        (JSC::JIT::compileCallEval):
+        (JSC::JIT::compileCallEvalSlowCase):
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        * jit/JITCall32_64.cpp:
+        (JSC::JIT::compileCallEvalSlowCase):
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        * jit/JITCode.cpp:
+        (JSC::JITCode::execute):
+        (JSC::DirectJITCode::DirectJITCode):
+        (JSC::DirectJITCode::initializeEntryPoints):
+        (JSC::DirectJITCode::addressForCall):
+        (JSC::NativeJITCode::addressForCall):
+        (JSC::DirectJITCode::initializeCodeRef): Deleted.
+        * jit/JITCode.h:
+        (JSC::JITCode::executableAddress): Deleted.
+        * jit/JITEntryPoints.h: Added.
+        (JSC::JITEntryPoints::JITEntryPoints):
+        (JSC::JITEntryPoints::entryFor):
+        (JSC::JITEntryPoints::setEntryFor):
+        (JSC::JITEntryPoints::offsetOfEntryFor):
+        (JSC::JITEntryPoints::registerEntryTypeForArgumentCount):
+        (JSC::JITEntryPoints::registerEntryTypeForArgumentType):
+        (JSC::JITEntryPoints::clearEntries):
+        (JSC::JITEntryPoints::operator=):
+        (JSC::JITEntryPointsWithRef::JITEntryPointsWithRef):
+        (JSC::JITEntryPointsWithRef::codeRef):
+        (JSC::argumentsLocationFor):
+        (JSC::registerEntryPointTypeFor):
+        (JSC::entryPointTypeFor):
+        (JSC::thunkEntryPointTypeFor):
+        (JSC::JITJSCallThunkEntryPointsWithRef::JITJSCallThunkEntryPointsWithRef):
+        (JSC::JITJSCallThunkEntryPointsWithRef::entryFor):
+        (JSC::JITJSCallThunkEntryPointsWithRef::setEntryFor):
+        (JSC::JITJSCallThunkEntryPointsWithRef::offsetOfEntryFor):
+        (JSC::JITJSCallThunkEntryPointsWithRef::clearEntries):
+        (JSC::JITJSCallThunkEntryPointsWithRef::codeRef):
+        (JSC::JITJSCallThunkEntryPointsWithRef::operator=):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::privateCompileJITEntryNativeCall):
+        (JSC::JIT::privateCompileCTINativeCall): Deleted.
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::privateCompileJITEntryNativeCall):
+        (JSC::JIT::privateCompileCTINativeCall): Deleted.
+        * jit/JITOperations.cpp:
+        * jit/JITThunks.cpp:
+        (JSC::JITThunks::jitEntryNativeCall):
+        (JSC::JITThunks::jitEntryNativeConstruct):
+        (JSC::JITThunks::jitEntryStub):
+        (JSC::JITThunks::jitCallThunkEntryStub):
+        (JSC::JITThunks::hostFunctionStub):
+        (JSC::JITThunks::ctiNativeCall): Deleted.
+        (JSC::JITThunks::ctiNativeConstruct): Deleted.
+        * jit/JITThunks.h:
+        * jit/JSInterfaceJIT.h:
+        (JSC::JSInterfaceJIT::emitJumpIfNotInt32):
+        (JSC::JSInterfaceJIT::emitLoadInt32):
+        * jit/RegisterSet.cpp:
+        (JSC::RegisterSet::argumentRegisters):
+        * jit/RegisterSet.h:
+        * jit/Repatch.cpp:
+        (JSC::linkSlowFor):
+        (JSC::revertCall):
+        (JSC::unlinkFor):
+        (JSC::linkVirtualFor):
+        (JSC::linkPolymorphicCall):
+        * jit/SpecializedThunkJIT.h:
+        (JSC::SpecializedThunkJIT::SpecializedThunkJIT):
+        (JSC::SpecializedThunkJIT::checkJSStringArgument):
+        (JSC::SpecializedThunkJIT::linkFailureHere):
+        (JSC::SpecializedThunkJIT::finalize):
+        * jit/ThunkGenerator.h:
+        * jit/ThunkGenerators.cpp:
+        (JSC::createRegisterArgumentsSpillEntry):
+        (JSC::slowPathFor):
+        (JSC::linkCallThunkGenerator):
+        (JSC::linkDirectCallThunkGenerator):
+        (JSC::linkPolymorphicCallThunkGenerator):
+        (JSC::virtualThunkFor):
+        (JSC::nativeForGenerator):
+        (JSC::nativeCallGenerator):
+        (JSC::nativeTailCallGenerator):
+        (JSC::nativeTailCallWithoutSavedTagsGenerator):
+        (JSC::nativeConstructGenerator):
+        (JSC::stringCharLoadRegCall):
+        (JSC::charCodeAtThunkGenerator):
+        (JSC::charAtThunkGenerator):
+        (JSC::fromCharCodeThunkGenerator):
+        (JSC::clz32ThunkGenerator):
+        (JSC::sqrtThunkGenerator):
+        (JSC::floorThunkGenerator):
+        (JSC::ceilThunkGenerator):
+        (JSC::truncThunkGenerator):
+        (JSC::roundThunkGenerator):
+        (JSC::expThunkGenerator):
+        (JSC::logThunkGenerator):
+        (JSC::absThunkGenerator):
+        (JSC::imulThunkGenerator):
+        (JSC::randomThunkGenerator):
+        (JSC::boundThisNoArgsFunctionCallGenerator):
+        * jit/ThunkGenerators.h:
+        * jsc.cpp:
+        (jscmain):
+        * llint/LLIntEntrypoint.cpp:
+        (JSC::LLInt::setFunctionEntrypoint):
+        (JSC::LLInt::setEvalEntrypoint):
+        (JSC::LLInt::setProgramEntrypoint):
+        (JSC::LLInt::setModuleProgramEntrypoint):
+        * llint/LLIntSlowPaths.cpp:
+        (JSC::LLInt::entryOSR):
+        (JSC::LLInt::setUpCall):
+        * llint/LLIntThunks.cpp:
+        (JSC::LLInt::generateThunkWithJumpTo):
+        (JSC::LLInt::functionForRegisterCallEntryThunkGenerator):
+        (JSC::LLInt::functionForStackCallEntryThunkGenerator):
+        (JSC::LLInt::functionForRegisterConstructEntryThunkGenerator):
+        (JSC::LLInt::functionForStackConstructEntryThunkGenerator):
+        (JSC::LLInt::functionForRegisterCallArityCheckThunkGenerator):
+        (JSC::LLInt::functionForStackCallArityCheckThunkGenerator):
+        (JSC::LLInt::functionForRegisterConstructArityCheckThunkGenerator):
+        (JSC::LLInt::functionForStackConstructArityCheckThunkGenerator):
+        (JSC::LLInt::functionForCallEntryThunkGenerator): Deleted.
+        (JSC::LLInt::functionForConstructEntryThunkGenerator): Deleted.
+        (JSC::LLInt::functionForCallArityCheckThunkGenerator): Deleted.
+        (JSC::LLInt::functionForConstructArityCheckThunkGenerator): Deleted.
+        * llint/LLIntThunks.h:
+        * runtime/ArityCheckMode.h:
+        * runtime/ExecutableBase.cpp:
+        (JSC::ExecutableBase::clearCode):
+        * runtime/ExecutableBase.h:
+        (JSC::ExecutableBase::entrypointFor):
+        (JSC::ExecutableBase::offsetOfEntryFor):
+        (JSC::ExecutableBase::offsetOfJITCodeWithArityCheckFor): Deleted.
+        * runtime/JSBoundFunction.cpp:
+        (JSC::boundThisNoArgsFunctionCall):
+        * runtime/NativeExecutable.cpp:
+        (JSC::NativeExecutable::finishCreation):
+        * runtime/ScriptExecutable.cpp:
+        (JSC::ScriptExecutable::installCode):
+        * runtime/VM.cpp:
+        (JSC::VM::VM):
+        (JSC::thunkGeneratorForIntrinsic):
+        (JSC::VM::clearCounters):
+        (JSC::VM::dumpCounters):
+        * runtime/VM.h:
+        (JSC::VM::getJITEntryStub):
+        (JSC::VM::getJITCallThunkEntryStub):
+        (JSC::VM::addressOfCounter):
+        (JSC::VM::counterFor):
+        * wasm/WasmBinding.cpp:
+        (JSC::Wasm::importStubGenerator):
+
+2016-12-09  Keith Miller  
+
+        Wasm should support call_indirect
+        https://bugs.webkit.org/show_bug.cgi?id=165718
+
+        Reviewed by Filip Pizlo.
+
+        This patch adds support for call_indirect. The basic framework for
+        an indirect call is that the module holds a buffer containing a
+        stub for each function in the index space. Whenever a function
+        needs to do an indirect call it gets a index into that table. In
+        order to ensure call_indirect is calling a valid function the
+        functionIndexSpace also needs a pointer to a canonicalized
+        signature. When making an indirect call, we first check the index
+        is in range, then check the signature matches the value we were given.
+
+        This patch also differentiates between FunctionIndexSpaces and
+        ImmutableFunctionIndexSpaces. Since we don't know the size of the
+        FunctionIndexSpace when we start parsing we need to be able to
+        resize the IndexSpace. However, once we have finished parsing all
+        the sections we want to prevent an relocation of the function
+        index space pointer.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::B3IRGenerator::addCall):
+        (JSC::Wasm::B3IRGenerator::addCallIndirect):
+        (JSC::Wasm::createJSToWasmWrapper):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmB3IRGenerator.h:
+        * wasm/WasmCallingConvention.h:
+        (JSC::Wasm::CallingConvention::setupCall):
+        * wasm/WasmFormat.h:
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::setErrorMessage):
+        (JSC::Wasm::FunctionParser::FunctionParser):
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/WasmPlan.cpp:
+        (JSC::Wasm::Plan::run):
+        * wasm/WasmPlan.h:
+        (JSC::Wasm::Plan::takeFunctionIndexSpace):
+        * wasm/WasmValidate.cpp:
+        (JSC::Wasm::Validate::addCallIndirect):
+        (JSC::Wasm::validateFunction):
+        * wasm/WasmValidate.h:
+        * wasm/js/JSWebAssemblyModule.cpp:
+        (JSC::JSWebAssemblyModule::create):
+        (JSC::JSWebAssemblyModule::JSWebAssemblyModule):
+        * wasm/js/JSWebAssemblyModule.h:
+        (JSC::JSWebAssemblyModule::signatureForFunctionIndexSpace):
+        (JSC::JSWebAssemblyModule::offsetOfFunctionIndexSpace):
+
+2016-12-09  JF Bastien  
+
+        WebAssembly: implement data section
+        https://bugs.webkit.org/show_bug.cgi?id=165696
+
+        Reviewed by Keith Miller.
+
+        As specified in https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#data-section
+        Note that some of the interesting corner cases are ill-defined by the spec: https://github.com/WebAssembly/design/issues/897
+
+        * wasm/WasmFormat.h: segments are what represent sections of memory to initialize (similar to ELF's non-zero intializer data / rodata)
+        (JSC::Wasm::Segment::make):
+        (JSC::Wasm::Segment::destroy):
+        (JSC::Wasm::Segment::byte):
+        (JSC::Wasm::Segment::makePtr):
+        * wasm/WasmModuleParser.cpp: parse the data section, and prevent a few overflows if a user passes in UINT_MAX (the loops would overflow)
+        (JSC::Wasm::ModuleParser::parseType):
+        (JSC::Wasm::ModuleParser::parseImport):
+        (JSC::Wasm::ModuleParser::parseFunction):
+        (JSC::Wasm::ModuleParser::parseExport):
+        (JSC::Wasm::ModuleParser::parseCode):
+        (JSC::Wasm::ModuleParser::parseData):
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::evaluate): the only sensible time to initialize the data section is after linking, but before calling start, I test for this but the spec isn't clear it's correct yet
+
+2016-12-09  Karim H  
+
+        It is okay to turn undefined into null because we are producing values for a
+        JSON representation (InspectorValue) and JSON has a `null` value and no
+        `undefined` value.
+        https://bugs.webkit.org/show_bug.cgi?id=165506
+
+        Reviewed by Darin Adler.
+
+        * bindings/ScriptValue.cpp:
+        (Inspector::jsToInspectorValue):
+
+2016-12-09  Filip Pizlo  
+
+        REGRESSION (r209554-209571): stress/poly-setter-combo crashing
+        https://bugs.webkit.org/show_bug.cgi?id=165669
+
+        Reviewed by Geoffrey Garen.
+        
+        We now rely on objects being zero-filled in a bunch of places, not just concurrent GC.
+        So, we need 32-bit to do it too.
+
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::compile):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::emit_op_new_object):
+
+2016-12-09  Eric Carlson  
+
+        Annotate MediaStream and WebRTC idl with EnabledAtRuntime flag
+        https://bugs.webkit.org/show_bug.cgi?id=165251
+
+        Reviewed by Dean Jackson.
+
+        Based on a patch by Dr Alex Gouaillard 
+
+        * runtime/CommonIdentifiers.h: Add WebRTC and MediaStream identifiers.
+
+2016-12-09  JF Bastien  
+
+        WebAssembly JS API: implement start function
+        https://bugs.webkit.org/show_bug.cgi?id=165150
+
+        Reviewed by Saam Barati.
+
+        * wasm/WasmFormat.h: pass the start function around
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseTable): mark unreachable code
+        (JSC::Wasm::ModuleParser::parseGlobal): mark unreachable code
+        (JSC::Wasm::ModuleParser::parseStart): mark unreachable code
+        (JSC::Wasm::ModuleParser::parseElement): mark unreachable code
+        (JSC::Wasm::ModuleParser::parseData): mark unreachable code
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::callWebAssemblyFunction): NFC: call the new function below
+        (JSC::WebAssemblyFunction::call): separate this out so that the start function can use it
+        * wasm/js/WebAssemblyFunction.h:
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::visitChildren): visit the start function
+        (JSC::WebAssemblyModuleRecord::link): handle start function
+        (JSC::WebAssemblyModuleRecord::evaluate): call the start function, if present
+        * wasm/js/WebAssemblyModuleRecord.h:
+
+2016-12-09  Filip Pizlo  
+
+        GC might be forced to look at a nuked object due to ordering of AllocatePropertyStorage, MaterializeNewObject, and PutStructure
+        https://bugs.webkit.org/show_bug.cgi?id=165672
+
+        Reviewed by Geoffrey Garen.
+        
+        We need to make sure that the shady stuff in a property put happens after the
+        PutByOffset, since the PutByOffset is the place where we materialize. More generally, we
+        should strive to not have any fenceposts between Nodes where a GC would be illegal.
+        
+        This gets us most of the way there by separating NukeStructureAndSetButterfly from
+        [Re]AllocatePropertyStorage. A transitioning put will now look something like:
+        
+            GetButterfly
+            ReallocatePropertyStorage
+            PutByOffset
+            NukeStructureAndSetButterfly
+            PutStructure
+        
+        Previously the structure would get nuked by ReallocatePropertyStorage, so if we placed
+        an object materialization just after it (before the PutByOffset) then any GC that
+        completed at that safepoint would encounter an unresolved visit race due to seeing a
+        nuked structure. We cannot have nuked structures at safepoints, and this change makes
+        sure that we don't - at least until someone tries to sink to the PutStructure. We will
+        eventually have to create a combined SetStructureAndButterfly node, but we don't need it
+        yet.
+        
+        This also fixes a goof where the DFG's AllocatePropertyStorage was nulling the structure
+        instead of nuking it. This could easily have caused many crashes in GC.
+        
+        * dfg/DFGAbstractInterpreterInlines.h:
+        (JSC::DFG::AbstractInterpreter::executeEffects):
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::handlePutById):
+        * dfg/DFGClobberize.h:
+        (JSC::DFG::clobberize):
+        * dfg/DFGClobbersExitState.cpp:
+        (JSC::DFG::clobbersExitState):
+        * dfg/DFGConstantFoldingPhase.cpp:
+        (JSC::DFG::ConstantFoldingPhase::emitPutByOffset):
+        * dfg/DFGDoesGC.cpp:
+        (JSC::DFG::doesGC):
+        * dfg/DFGFixupPhase.cpp:
+        (JSC::DFG::FixupPhase::fixupNode):
+        * dfg/DFGMayExit.cpp:
+        * dfg/DFGNodeType.h:
+        * dfg/DFGOperations.cpp:
+        * dfg/DFGOperations.h:
+        * dfg/DFGPredictionPropagationPhase.cpp:
+        * dfg/DFGSafeToExecute.h:
+        (JSC::DFG::safeToExecute):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
+        (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
+        (JSC::DFG::SpeculativeJIT::compileNukeStructureAndSetButterfly):
+        * dfg/DFGSpeculativeJIT.h:
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::compile):
+        * dfg/DFGStoreBarrierInsertionPhase.cpp:
+        * dfg/DFGTypeCheckHoistingPhase.cpp:
+        (JSC::DFG::TypeCheckHoistingPhase::identifyRedundantStructureChecks):
+        * ftl/FTLCapabilities.cpp:
+        (JSC::FTL::canCompile):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::compileNode):
+        (JSC::FTL::DFG::LowerDFGToB3::compileNukeStructureAndSetButterfly):
+        (JSC::FTL::DFG::LowerDFGToB3::storageForTransition):
+        (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::reallocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorageWithSizeImpl):
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+        * runtime/Options.h: Fix a bug - make it possible to turn on concurrent GC optionally again.
+
+2016-12-09  Chris Dumez  
+
+        Inline JSCell::toObject()
+        https://bugs.webkit.org/show_bug.cgi?id=165679
+
+        Reviewed by Geoffrey Garen.
+
+        Inline JSCell::toObject() as it shows on Speedometer profiles.
+
+        * runtime/JSCell.cpp:
+        (JSC::JSCell::toObjectSlow):
+        (JSC::JSCell::toObject): Deleted.
+        * runtime/JSCell.h:
+        * runtime/JSCellInlines.h:
+        (JSC::JSCell::toObject):
+
+2016-12-09  Geoffrey Garen  
+
+        Deploy OrdinalNumber in JSC::SourceCode
+        https://bugs.webkit.org/show_bug.cgi?id=165687
+
+        Reviewed by Michael Saboff.
+
+        We have a lot of confusion between 1-based and 0-based counting in line
+        and column numbers. Let's use OrdinalNumber to clear up the confusion.
+
+        * bytecode/UnlinkedFunctionExecutable.cpp:
+        (JSC::UnlinkedFunctionExecutable::UnlinkedFunctionExecutable):
+        (JSC::UnlinkedFunctionExecutable::link):
+        * bytecompiler/BytecodeGenerator.h:
+        (JSC::BytecodeGenerator::emitExpressionInfo):
+        * inspector/JSInjectedScriptHost.cpp:
+        (Inspector::JSInjectedScriptHost::functionDetails):
+        * parser/Lexer.cpp:
+        (JSC::Lexer::setCode):
+        * parser/Parser.cpp:
+        (JSC::Parser::Parser):
+        * parser/Parser.h:
+        (JSC::Parser::parse):
+        * parser/SourceCode.h:
+        (JSC::SourceCode::SourceCode):
+        (JSC::SourceCode::firstLine):
+        (JSC::SourceCode::startColumn):
+        * runtime/CodeCache.cpp:
+        (JSC::CodeCache::getUnlinkedGlobalCodeBlock):
+        * runtime/ScriptExecutable.h:
+        (JSC::ScriptExecutable::firstLine):
+        (JSC::ScriptExecutable::startColumn):
+        * tools/CodeProfile.h:
+        (JSC::CodeProfile::CodeProfile):
+
+2016-12-09  Saam Barati  
+
+        WebAssembly JS API: implement importing and defining Memory
+        https://bugs.webkit.org/show_bug.cgi?id=164134
+
+        Reviewed by Keith Miller.
+
+        This patch implements the WebAssembly.Memory object. It refactors
+        the code to now associate a Memory with the instance instead of
+        the Module.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * jsc.cpp:
+        (functionTestWasmModuleFunctions):
+        * runtime/VM.h:
+        * shell/CMakeLists.txt:
+        * testWasm.cpp: Removed.
+        This has bitrotted. I'm removing it.
+
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator):
+        (JSC::Wasm::sizeOfLoadOp):
+        (JSC::Wasm::createJSToWasmWrapper):
+        (JSC::Wasm::parseAndCompile):
+        * wasm/WasmB3IRGenerator.h:
+        * wasm/WasmFormat.cpp:
+        (JSC::Wasm::ModuleInformation::~ModuleInformation): Deleted.
+        * wasm/WasmFormat.h:
+        * wasm/WasmMemory.cpp:
+        (JSC::Wasm::Memory::Memory):
+        * wasm/WasmMemory.h:
+        (JSC::Wasm::Memory::size):
+        (JSC::Wasm::Memory::initial):
+        (JSC::Wasm::Memory::maximum):
+        (JSC::Wasm::Memory::pinnedRegisters): Deleted.
+        * wasm/WasmMemoryInformation.cpp: Added.
+        (JSC::Wasm::MemoryInformation::MemoryInformation):
+        * wasm/WasmMemoryInformation.h: Added.
+        (JSC::Wasm::MemoryInformation::MemoryInformation):
+        (JSC::Wasm::MemoryInformation::pinnedRegisters):
+        (JSC::Wasm::MemoryInformation::initial):
+        (JSC::Wasm::MemoryInformation::maximum):
+        (JSC::Wasm::MemoryInformation::isImport):
+        (JSC::Wasm::MemoryInformation::operator bool):
+        * wasm/WasmModuleParser.cpp:
+        (JSC::Wasm::ModuleParser::parseImport):
+        (JSC::Wasm::ModuleParser::parseMemoryHelper):
+        (JSC::Wasm::ModuleParser::parseMemory):
+        (JSC::Wasm::ModuleParser::parseExport):
+        * wasm/WasmModuleParser.h:
+        * wasm/WasmPageCount.h: Added. Implement a new way of describing Wasm
+        pages and then asking for how many bytes a quantity of pages is. This
+        class also makes it clear when we're talking about bytes or pages.
+
+        (JSC::Wasm::PageCount::PageCount):
+        (JSC::Wasm::PageCount::bytes):
+        (JSC::Wasm::PageCount::isValid):
+        (JSC::Wasm::PageCount::max):
+        (JSC::Wasm::PageCount::operator bool):
+        (JSC::Wasm::PageCount::operator<):
+        (JSC::Wasm::PageCount::operator>):
+        (JSC::Wasm::PageCount::operator>=):
+        * wasm/WasmPlan.cpp:
+        (JSC::Wasm::Plan::run):
+        * wasm/WasmPlan.h:
+        (JSC::Wasm::Plan::memory): Deleted.
+        * wasm/WasmValidate.cpp:
+        (JSC::Wasm::Validate::hasMemory):
+        (JSC::Wasm::Validate::Validate):
+        (JSC::Wasm::validateFunction):
+        * wasm/WasmValidate.h:
+        * wasm/generateWasmValidateInlinesHeader.py:
+        * wasm/js/JSWebAssemblyInstance.cpp:
+        (JSC::JSWebAssemblyInstance::visitChildren):
+        * wasm/js/JSWebAssemblyInstance.h:
+        (JSC::JSWebAssemblyInstance::memory):
+        (JSC::JSWebAssemblyInstance::setMemory):
+        (JSC::JSWebAssemblyInstance::offsetOfImportFunctions):
+        (JSC::JSWebAssemblyInstance::allocationSize):
+        * wasm/js/JSWebAssemblyMemory.cpp:
+        (JSC::JSWebAssemblyMemory::create):
+        (JSC::JSWebAssemblyMemory::JSWebAssemblyMemory):
+        (JSC::JSWebAssemblyMemory::buffer):
+        (JSC::JSWebAssemblyMemory::visitChildren):
+        * wasm/js/JSWebAssemblyMemory.h:
+        (JSC::JSWebAssemblyMemory::memory):
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::callWebAssemblyFunction):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        Handle importing and creating of memory according
+        to the spec. This also does the needed validation
+        of making sure the memory defined in the module
+        is compatible with the imported memory.
+
+        (JSC::constructJSWebAssemblyInstance):
+        * wasm/js/WebAssemblyMemoryConstructor.cpp:
+        (JSC::constructJSWebAssemblyMemory):
+        (JSC::callJSWebAssemblyMemory):
+        * wasm/js/WebAssemblyMemoryPrototype.cpp:
+        (JSC::webAssemblyMemoryProtoFuncBuffer):
+        (JSC::WebAssemblyMemoryPrototype::create):
+        (JSC::WebAssemblyMemoryPrototype::finishCreation):
+        * wasm/js/WebAssemblyMemoryPrototype.h:
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::finishCreation):
+        (JSC::WebAssemblyModuleRecord::link):
+
+2016-12-09  Joseph Pecoraro  
+
+        Web Inspector: Some resources fetched via Fetch API do not have data
+        https://bugs.webkit.org/show_bug.cgi?id=165230
+        
+
+        Reviewed by Alex Christensen.
+
+        * inspector/protocol/Page.json:
+        Add new Fetch Page.ResourceType.
+
+2016-12-09  Geoffrey Garen  
+
+        TextPosition and OrdinalNumber should be more like idiomatic numbers
+        https://bugs.webkit.org/show_bug.cgi?id=165678
+
+        Reviewed by Filip Pizlo.
+
+        Adopt default constructor.
+
+        * API/JSBase.cpp:
+        (JSEvaluateScript):
+        (JSCheckScriptSyntax):
+        * API/JSObjectRef.cpp:
+        (JSObjectMakeFunction):
+        * API/JSScriptRef.cpp:
+        (OpaqueJSScript::OpaqueJSScript):
+        * jsc.cpp:
+        (functionCheckModuleSyntax):
+        * parser/SourceCode.h:
+        (JSC::makeSource):
+        * parser/SourceProvider.h:
+        (JSC::StringSourceProvider::create):
+        (JSC::WebAssemblySourceProvider::WebAssemblySourceProvider):
+        * runtime/FunctionConstructor.cpp:
+        (JSC::constructFunction):
+        * runtime/ModuleLoaderPrototype.cpp:
+        (JSC::moduleLoaderPrototypeParseModule):
+
+2016-12-09  Filip Pizlo  
+
+        Unreviewed, disable concurrent GC for real.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-09  Filip Pizlo  
+
+        Unreviewed, disable concurrent GC while crashes get investigated.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-09  Filip Pizlo  
+
+        JSSegmentedVariableObject should keep its state private
+
+        Rubber stamped by Michael Saboff.
+        
+        Its state fields were protected for no reason. They really should be private because
+        you have to know to obey a particular concurrency protocol when accessing them.
+
+        * runtime/JSSegmentedVariableObject.h:
+
+2016-12-09  Csaba Osztrogonác  
+
+        Unreviewed ARM buildfix after 209570.
+
+        * assembler/MacroAssemblerARM.h:
+        (JSC::MacroAssemblerARM::or32): Added.
+
+2016-12-08  JF Bastien  
+
+        WebAssembly: JSC::link* shouldn't need a CodeBlock
+        https://bugs.webkit.org/show_bug.cgi?id=165591
+
+        Reviewed by Keith Miller.
+
+        Allow linking without a CodeBlock, which WebAssembly's wasm -> JS stubs does. This needs to work for polymorphic and virtual calls. This patch adds corresponding tests for this.
+
+        * assembler/LinkBuffer.cpp:
+        (JSC::shouldDumpDisassemblyFor): don't look at the tier option if there isn't a CodeBlock, only look at the global one. This is a WebAssembly function, so the tier information is irrelevant.
+        * jit/Repatch.cpp:
+        (JSC::isWebAssemblyToJSCallee): this is used in the link* functions below
+        (JSC::linkFor):
+        (JSC::linkVirtualFor):
+        (JSC::linkPolymorphicCall):
+        * runtime/Options.h: add an option to change the maximum number of polymorphic calls in stubs from wasm to JS, which will come in handy when we try to tune performance or try merging some of the WebAssembly stubs
+        * wasm/WasmBinding.cpp:
+        (JSC::Wasm::importStubGenerator): remove the breakpoint since the code now works
+        * wasm/js/WebAssemblyToJSCallee.h:
+
+2016-12-08  Filip Pizlo  
+
+        MultiPutByOffset should get a barrier if it transitions
+        https://bugs.webkit.org/show_bug.cgi?id=165646
+
+        Reviewed by Keith Miller.
+        
+        Previously, if we knew that we were storing a non-cell but we needed to transition, we
+        would fail to add the barrier but the FTL's lowering expected the barrier to be there.
+        
+        Strictly, we need to "consider" the barrier on MultiPutByOffset if the value is
+        possibly a cell or if the MultiPutByOffset may transition. Then "considering" the
+        barrier implies checking if the base is possibly old.
+        
+        But because the barrier is so cheap anyway, this patch implements something safer: we
+        just consider the barrier on MultiPutByOffset unconditionally, which opts it out of any
+        barrier optimizations other than those based on the predicted state of the base. Those
+        optimizations are already sound - for example they use doesGC() to detect safepoints
+        and that function correctly predicts when MultiPutByOffset could GC.
+        
+        Because the barrier optimizations are only a very small speed-up, I think it's great to
+        fix bugs by weakening the optimizer without cleverness.
+
+        * dfg/DFGFixupPhase.cpp:
+        * dfg/DFGStoreBarrierInsertionPhase.cpp:
+        * heap/MarkedBlock.cpp:
+        (JSC::MarkedBlock::assertValidCell):
+
+2016-12-08  Filip Pizlo  
+
+        Enable concurrent GC on ARM64
+        https://bugs.webkit.org/show_bug.cgi?id=165643
+
+        Reviewed by Saam Barati.
+
+        It looks stable enough to enable.
+
+        * assembler/CPU.h:
+        (JSC::useGCFences): Deleted.
+        * bytecode/PolymorphicAccess.cpp:
+        (JSC::AccessCase::generateImpl):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
+        (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
+        * ftl/FTLLowerDFGToB3.cpp:
+        (JSC::FTL::DFG::LowerDFGToB3::compileMaterializeNewObject):
+        (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::reallocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::allocateObject):
+        * jit/AssemblyHelpers.h:
+        (JSC::AssemblyHelpers::mutatorFence):
+        (JSC::AssemblyHelpers::storeButterfly):
+        (JSC::AssemblyHelpers::nukeStructureAndStoreButterfly):
+        (JSC::AssemblyHelpers::emitInitializeInlineStorage):
+        (JSC::AssemblyHelpers::emitInitializeOutOfLineStorage):
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-08  Filip Pizlo  
+
+        Disable collectContinuously if not useConcurrentGC
+
+        Rubber stamped by Geoffrey Garen.
+
+        * runtime/Options.cpp:
+        (JSC::recomputeDependentOptions):
+
+2016-12-08  Filip Pizlo  
+
+        Unreviewed, fix cloop build.
+
+        * runtime/JSObject.h:
+
+2016-12-06  Filip Pizlo  
+
+        Concurrent GC should be stable enough to land enabled on X86_64
+        https://bugs.webkit.org/show_bug.cgi?id=164990
+
+        Reviewed by Geoffrey Garen.
+        
+        This fixes a ton of performance and correctness bugs revealed by getting the concurrent GC to
+        be stable enough to land enabled.
+        
+        I had to redo the JSObject::visitChildren concurrency protocol again. This time I think it's
+        even more correct than ever!
+        
+        This is an enormous win on JetStream/splay-latency and Octane/SplayLatency. It looks to be
+        mostly neutral on everything else, though Speedometer is showing statistically weak signs of a
+        slight regression.
+
+        * API/JSAPIWrapperObject.mm: Added locking.
+        (JSC::JSAPIWrapperObject::visitChildren):
+        * API/JSCallbackObject.h: Added locking.
+        (JSC::JSCallbackObjectData::visitChildren):
+        (JSC::JSCallbackObjectData::JSPrivatePropertyMap::setPrivateProperty):
+        (JSC::JSCallbackObjectData::JSPrivatePropertyMap::deletePrivateProperty):
+        (JSC::JSCallbackObjectData::JSPrivatePropertyMap::visitChildren):
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::UnconditionalFinalizer::finalizeUnconditionally): This had a TOCTOU race on shouldJettisonDueToOldAge.
+        (JSC::EvalCodeCache::visitAggregate): Moved to EvalCodeCache.cpp.
+        * bytecode/DirectEvalCodeCache.cpp: Added. Outlined some functions and made them use locks.
+        (JSC::DirectEvalCodeCache::setSlow):
+        (JSC::DirectEvalCodeCache::clear):
+        (JSC::DirectEvalCodeCache::visitAggregate):
+        * bytecode/DirectEvalCodeCache.h:
+        (JSC::DirectEvalCodeCache::set):
+        (JSC::DirectEvalCodeCache::clear): Deleted.
+        * bytecode/UnlinkedCodeBlock.cpp: Added locking.
+        (JSC::UnlinkedCodeBlock::visitChildren):
+        (JSC::UnlinkedCodeBlock::setInstructions):
+        (JSC::UnlinkedCodeBlock::shrinkToFit):
+        * bytecode/UnlinkedCodeBlock.h: Added locking.
+        (JSC::UnlinkedCodeBlock::addRegExp):
+        (JSC::UnlinkedCodeBlock::addConstant):
+        (JSC::UnlinkedCodeBlock::addFunctionDecl):
+        (JSC::UnlinkedCodeBlock::addFunctionExpr):
+        (JSC::UnlinkedCodeBlock::createRareDataIfNecessary):
+        (JSC::UnlinkedCodeBlock::shrinkToFit): Deleted.
+        * debugger/Debugger.cpp: Use the right delete API.
+        (JSC::Debugger::recompileAllJSFunctions):
+        * dfg/DFGAbstractInterpreterInlines.h:
+        (JSC::DFG::AbstractInterpreter::executeEffects): Fix a pre-existing bug in ToFunction constant folding.
+        * dfg/DFGClobberize.h: Add support for nuking.
+        (JSC::DFG::clobberize):
+        * dfg/DFGClobbersExitState.cpp: Add support for nuking.
+        (JSC::DFG::clobbersExitState):
+        * dfg/DFGFixupPhase.cpp: Add support for nuking.
+        (JSC::DFG::FixupPhase::fixupNode):
+        (JSC::DFG::FixupPhase::indexForChecks):
+        (JSC::DFG::FixupPhase::originForCheck):
+        (JSC::DFG::FixupPhase::speculateForBarrier):
+        (JSC::DFG::FixupPhase::insertCheck):
+        (JSC::DFG::FixupPhase::fixupChecksInBlock):
+        * dfg/DFGSpeculativeJIT.cpp: Add support for nuking.
+        (JSC::DFG::SpeculativeJIT::compileAllocatePropertyStorage):
+        (JSC::DFG::SpeculativeJIT::compileReallocatePropertyStorage):
+        * ftl/FTLLowerDFGToB3.cpp: Add support for nuking.
+        (JSC::FTL::DFG::LowerDFGToB3::allocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::reallocatePropertyStorage):
+        (JSC::FTL::DFG::LowerDFGToB3::mutatorFence):
+        (JSC::FTL::DFG::LowerDFGToB3::nukeStructureAndSetButterfly):
+        (JSC::FTL::DFG::LowerDFGToB3::setButterfly): Deleted.
+        * heap/CodeBlockSet.cpp: We need to be more careful about the CodeBlockSet workflow during GC, since we will allocate CodeBlocks in eden while collecting.
+        (JSC::CodeBlockSet::clearMarksForFullCollection):
+        (JSC::CodeBlockSet::deleteUnmarkedAndUnreferenced):
+        * heap/Heap.cpp: Added code to measure max pauses. Added a better collectContinuously mode.
+        (JSC::Heap::lastChanceToFinalize): Stop the collectContinuously thread.
+        (JSC::Heap::harvestWeakReferences): Inline SlotVisitor::harvestWeakReferences.
+        (JSC::Heap::finalizeUnconditionalFinalizers): Inline SlotVisitor::finalizeUnconditionalReferences.
+        (JSC::Heap::markToFixpoint): We need to do some MarkedSpace stuff before every conservative scan, rather than just at the start of marking, so we now call prepareForConservativeScan() before each conservative scan. Also call a less-parallel version of drainInParallel when the mutator is running.
+        (JSC::Heap::collectInThread): Inline Heap::prepareForAllocation().
+        (JSC::Heap::stopIfNecessarySlow): We need to be more careful about ensuring that we run finalization before and after stopping. Also, we should sanitize stack when stopping the world.
+        (JSC::Heap::acquireAccessSlow): Add some optional debug prints.
+        (JSC::Heap::handleNeedFinalize): Assert that we are running this when the world is not stopped.
+        (JSC::Heap::finalize): Remove the old collectContinuously code.
+        (JSC::Heap::requestCollection): We don't need to sanitize stack here anymore.
+        (JSC::Heap::notifyIsSafeToCollect): Start the collectContinuously thread. It will request collection 1 KHz.
+        (JSC::Heap::prepareForAllocation): Deleted.
+        (JSC::Heap::preventCollection): Prevent any new concurrent GCs from being initiated.
+        (JSC::Heap::allowCollection):
+        (JSC::Heap::forEachSlotVisitor): Allows us to safely iterate slot visitors.
+        * heap/Heap.h:
+        * heap/HeapInlines.h:
+        (JSC::Heap::writeBarrier): If the 'to' cell is not NewWhite then it could be AnthraciteOrBlack. During a full collection, objects may be AnthraciteOrBlack from a previous GC. Turns out, we don't benefit from this optimization so we can just kill it.
+        * heap/HeapSnapshotBuilder.cpp:
+        (JSC::HeapSnapshotBuilder::buildSnapshot): This needs to use PreventCollectionScope to ensure snapshot soundness.
+        * heap/ListableHandler.h:
+        (JSC::ListableHandler::isOnList): Useful helper.
+        * heap/LockDuringMarking.h:
+        (JSC::lockDuringMarking): It's a locker that only locks while we're marking.
+        * heap/MarkedAllocator.cpp:
+        (JSC::MarkedAllocator::addBlock): Hold the bitvector lock while resizing.
+        * heap/MarkedBlock.cpp: Hold the bitvector lock while accessing the bitvectors while the mutator is running.
+        * heap/MarkedSpace.cpp:
+        (JSC::MarkedSpace::prepareForConservativeScan): We used to do this in prepareForMarking, but we need to do it before each conservative scan not just before marking.
+        (JSC::MarkedSpace::prepareForMarking): Remove the logic moved to prepareForConservativeScan.
+        * heap/MarkedSpace.h:
+        * heap/PreventCollectionScope.h: Added.
+        * heap/SlotVisitor.cpp: Refactored drainFromShared so that we can write a similar function called drainInParallelPassively.
+        (JSC::SlotVisitor::updateMutatorIsStopped): Update whether we can use "fast" scanning.
+        (JSC::SlotVisitor::mutatorIsStoppedIsUpToDate):
+        (JSC::SlotVisitor::didReachTermination):
+        (JSC::SlotVisitor::hasWork):
+        (JSC::SlotVisitor::drain): This now uses the rightToRun lock to allow the main GC thread to safepoint the workers.
+        (JSC::SlotVisitor::drainFromShared):
+        (JSC::SlotVisitor::drainInParallelPassively): This runs marking with one fewer threads than normal. It's useful for when we have resumed the mutator, since then the mutator has a better chance of getting on a core.
+        (JSC::SlotVisitor::addWeakReferenceHarvester):
+        (JSC::SlotVisitor::addUnconditionalFinalizer):
+        (JSC::SlotVisitor::harvestWeakReferences): Deleted.
+        (JSC::SlotVisitor::finalizeUnconditionalFinalizers): Deleted.
+        * heap/SlotVisitor.h:
+        * heap/SlotVisitorInlines.h: Outline stuff.
+        (JSC::SlotVisitor::addWeakReferenceHarvester): Deleted.
+        (JSC::SlotVisitor::addUnconditionalFinalizer): Deleted.
+        * runtime/InferredType.cpp: This needed thread safety.
+        (JSC::InferredType::visitChildren): This needs to keep its structure finalizer alive until it runs.
+        (JSC::InferredType::set):
+        (JSC::InferredType::InferredStructureFinalizer::finalizeUnconditionally):
+        * runtime/InferredType.h:
+        * runtime/InferredValue.cpp: This needed thread safety.
+        (JSC::InferredValue::visitChildren):
+        (JSC::InferredValue::ValueCleanup::finalizeUnconditionally):
+        * runtime/JSArray.cpp:
+        (JSC::JSArray::unshiftCountSlowCase): Update to use new butterfly API.
+        (JSC::JSArray::unshiftCountWithArrayStorage): Update to use new butterfly API.
+        * runtime/JSArrayBufferView.cpp:
+        (JSC::JSArrayBufferView::visitChildren): Thread safety.
+        * runtime/JSCell.h:
+        (JSC::JSCell::setStructureIDDirectly): This is used for nuking the structure.
+        (JSC::JSCell::InternalLocker::InternalLocker): Deleted. The cell is now the lock.
+        (JSC::JSCell::InternalLocker::~InternalLocker): Deleted. The cell is now the lock.
+        * runtime/JSCellInlines.h:
+        (JSC::JSCell::structure): Clean this up.
+        (JSC::JSCell::lock): The cell is now the lock.
+        (JSC::JSCell::tryLock):
+        (JSC::JSCell::unlock):
+        (JSC::JSCell::isLocked):
+        (JSC::JSCell::lockInternalLock): Deleted.
+        (JSC::JSCell::unlockInternalLock): Deleted.
+        * runtime/JSFunction.cpp:
+        (JSC::JSFunction::visitChildren): Thread safety.
+        * runtime/JSGenericTypedArrayViewInlines.h:
+        (JSC::JSGenericTypedArrayView::visitChildren): Thread safety.
+        (JSC::JSGenericTypedArrayView::slowDownAndWasteMemory): Thread safety.
+        * runtime/JSObject.cpp:
+        (JSC::JSObject::markAuxiliaryAndVisitOutOfLineProperties): Factor out this "easy" step of butterfly visiting.
+        (JSC::JSObject::visitButterfly): Make this achieve 100% precision about structure-butterfly relationships. This relies on the mutator "nuking" the structure prior to "locked" structure-butterfly transitions.
+        (JSC::JSObject::visitChildren): Use the new, nicer API.
+        (JSC::JSFinalObject::visitChildren): Use the new, nicer API.
+        (JSC::JSObject::enterDictionaryIndexingModeWhenArrayStorageAlreadyExists): Use the new butterfly API.
+        (JSC::JSObject::createInitialUndecided): Use the new butterfly API.
+        (JSC::JSObject::createInitialInt32): Use the new butterfly API.
+        (JSC::JSObject::createInitialDouble): Use the new butterfly API.
+        (JSC::JSObject::createInitialContiguous): Use the new butterfly API.
+        (JSC::JSObject::createArrayStorage): Use the new butterfly API.
+        (JSC::JSObject::convertUndecidedToContiguous): Use the new butterfly API.
+        (JSC::JSObject::convertUndecidedToArrayStorage): Use the new butterfly API.
+        (JSC::JSObject::convertInt32ToArrayStorage): Use the new butterfly API.
+        (JSC::JSObject::convertDoubleToContiguous): Use the new butterfly API.
+        (JSC::JSObject::convertDoubleToArrayStorage): Use the new butterfly API.
+        (JSC::JSObject::convertContiguousToArrayStorage): Use the new butterfly API.
+        (JSC::JSObject::increaseVectorLength): Use the new butterfly API.
+        (JSC::JSObject::shiftButterflyAfterFlattening): Use the new butterfly API.
+        * runtime/JSObject.h:
+        (JSC::JSObject::setButterfly): This now does all of the fences. Only use this when you are not also transitioning the structure or the structure's lastOffset.
+        (JSC::JSObject::nukeStructureAndSetButterfly): Use this when doing locked structure-butterfly transitions.
+        * runtime/JSObjectInlines.h:
+        (JSC::JSObject::putDirectWithoutTransition): Use the newly factored out API.
+        (JSC::JSObject::prepareToPutDirectWithoutTransition): Factor this out!
+        (JSC::JSObject::putDirectInternal): Use the newly factored out API.
+        * runtime/JSPropertyNameEnumerator.cpp:
+        (JSC::JSPropertyNameEnumerator::finishCreation): Locks!
+        (JSC::JSPropertyNameEnumerator::visitChildren): Locks!
+        * runtime/JSSegmentedVariableObject.cpp:
+        (JSC::JSSegmentedVariableObject::visitChildren): Locks!
+        * runtime/JSString.cpp:
+        (JSC::JSString::visitChildren): Thread safety.
+        * runtime/ModuleProgramExecutable.cpp:
+        (JSC::ModuleProgramExecutable::visitChildren): Thread safety.
+        * runtime/Options.cpp: For now we disable concurrent GC on not-X86_64.
+        (JSC::recomputeDependentOptions):
+        * runtime/Options.h: Change the default max GC parallelism to 8. I don't know why it was still 7.
+        * runtime/SamplingProfiler.cpp:
+        (JSC::SamplingProfiler::stackTracesAsJSON): This needs to defer GC before grabbing its lock.
+        * runtime/SparseArrayValueMap.cpp: This needed thread safety.
+        (JSC::SparseArrayValueMap::add):
+        (JSC::SparseArrayValueMap::remove):
+        (JSC::SparseArrayValueMap::visitChildren):
+        * runtime/SparseArrayValueMap.h:
+        * runtime/Structure.cpp: This had a race between addNewPropertyTransition and visitChildren.
+        (JSC::Structure::Structure):
+        (JSC::Structure::materializePropertyTable):
+        (JSC::Structure::addNewPropertyTransition):
+        (JSC::Structure::flattenDictionaryStructure):
+        (JSC::Structure::add): Help out with nuking support - the m_offset needs to play along.
+        (JSC::Structure::visitChildren):
+        * runtime/Structure.h: Make some useful things public - like the notion of a lastOffset.
+        * runtime/StructureChain.cpp:
+        (JSC::StructureChain::visitChildren): Thread safety!
+        * runtime/StructureChain.h: Thread safety!
+        * runtime/StructureIDTable.cpp:
+        (JSC::StructureIDTable::allocateID): Ensure that we don't get nuked IDs.
+        * runtime/StructureIDTable.h: Add the notion of a nuked ID! It's a bit that the runtime never sees except during specific shady actions like locked structure-butterfly transitions. "Nuking" tells the GC to steer clear and rescan once we fire the barrier.
+        (JSC::nukedStructureIDBit):
+        (JSC::nuke):
+        (JSC::isNuked):
+        (JSC::decontaminate):
+        * runtime/StructureInlines.h:
+        (JSC::Structure::hasIndexingHeader): Better API.
+        (JSC::Structure::add):
+        * runtime/VM.cpp: Better GC interaction.
+        (JSC::VM::ensureWatchdog):
+        (JSC::VM::deleteAllLinkedCode):
+        (JSC::VM::deleteAllCode):
+        * runtime/VM.h:
+        (JSC::VM::getStructure): Why wasn't this always an API!
+        * runtime/WebAssemblyExecutable.cpp:
+        (JSC::WebAssemblyExecutable::visitChildren): Thread safety.
+
+2016-12-08  Filip Pizlo  
+
+        Enable SharedArrayBuffer, remove the flag
+        https://bugs.webkit.org/show_bug.cgi?id=165614
+
+        Rubber stamped by Geoffrey Garen.
+
+        * runtime/JSGlobalObject.cpp:
+        (JSC::JSGlobalObject::init):
+        * runtime/RuntimeFlags.h:
+
+2016-12-08  JF Bastien  
+
+        WebAssembly JS API: wire up Instance imports
+        https://bugs.webkit.org/show_bug.cgi?id=165118
+
+        Reviewed by Saam Barati.
+
+        Change a bunch of the WebAssembly object model, and pipe the
+        necessary changes to be able to call JS imports from
+        WebAssembly. This will make it easier to call_indirect, and
+        unblock many other missing features.
+
+        As a follow-up I need to teach JSC::linkFor to live without a
+        CodeBlock: wasm doesn't have one and the IC patching is sad. We'll
+        switch on the callee (or its type?) and then use that as the owner
+        (because the callee is alive if the instance is alive, ditto
+        module, and module owns the CallLinkInfo).
+
+        * CMakeLists.txt:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * interpreter/CallFrame.h:
+        (JSC::ExecState::callee): give access to the callee as a JSCell
+        * jit/RegisterSet.cpp: dead code from previous WebAssembly implementation
+        * jsc.cpp:
+        (callWasmFunction):
+        (functionTestWasmModuleFunctions):
+        * runtime/JSCellInlines.h:
+        (JSC::ExecState::vm): check callee instead of jsCallee: wasm only has a JSCell and not a JSObject
+        * runtime/VM.cpp:
+        (JSC::VM::VM): store the "top" WebAssembly.Instance on entry to WebAssembly (and restore the previous one on exit)
+        * runtime/VM.h:
+        * testWasm.cpp:
+        (runWasmTests):
+        * wasm/JSWebAssembly.h:
+        * wasm/WasmB3IRGenerator.cpp:
+        (JSC::Wasm::B3IRGenerator::B3IRGenerator): pass unlinked calls around to shorten their lifetime: they're ony needed until the Plan is done
+        (JSC::Wasm::B3IRGenerator::addCall):
+        (JSC::Wasm::createJSToWasmWrapper):
+        (JSC::Wasm::parseAndCompile): also pass in the function index space, so that imports can be signature-checked along with internal functions
+        * wasm/WasmB3IRGenerator.h:
+        * wasm/WasmBinding.cpp: Added.
+        (JSC::Wasm::importStubGenerator): stubs from wasm to JS
+        * wasm/WasmBinding.h: Copied from Source/JavaScriptCore/wasm/WasmValidate.h.
+        * wasm/WasmCallingConvention.h:
+        (JSC::Wasm::CallingConvention::setupFrameInPrologue):
+        * wasm/WasmFormat.h: fix the object model
+        (JSC::Wasm::CallableFunction::CallableFunction):
+        * wasm/WasmFunctionParser.h: simplify some of the failure condition checks
+        (JSC::Wasm::FunctionParser::FunctionParser): need function index space, not just internal functions
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/WasmModuleParser.cpp: early-create some of the structures which will be needed later
+        (JSC::Wasm::ModuleParser::parseImport):
+        (JSC::Wasm::ModuleParser::parseFunction):
+        (JSC::Wasm::ModuleParser::parseMemory):
+        (JSC::Wasm::ModuleParser::parseExport):
+        (JSC::Wasm::ModuleParser::parseCode):
+        * wasm/WasmModuleParser.h:
+        (JSC::Wasm::ModuleParser::functionIndexSpace):
+        (JSC::Wasm::ModuleParser::functionLocations):
+        * wasm/WasmParser.h:
+        (JSC::Wasm::Parser::consumeUTF8String):
+        * wasm/WasmPlan.cpp: pass around the wasm objects at the right time, reducing their lifetime and making it easier to pass them around when needed
+        (JSC::Wasm::Plan::run):
+        (JSC::Wasm::Plan::initializeCallees):
+        * wasm/WasmPlan.h:
+        (JSC::Wasm::Plan::exports):
+        (JSC::Wasm::Plan::internalFunctionCount):
+        (JSC::Wasm::Plan::jsToWasmEntryPointForFunction):
+        (JSC::Wasm::Plan::takeModuleInformation):
+        (JSC::Wasm::Plan::takeCallLinkInfos):
+        (JSC::Wasm::Plan::takeWasmToJSStubs):
+        (JSC::Wasm::Plan::takeFunctionIndexSpace):
+        * wasm/WasmValidate.cpp: check function index space instead of only internal functions
+        (JSC::Wasm::Validate::addCall):
+        (JSC::Wasm::validateFunction):
+        * wasm/WasmValidate.h:
+        * wasm/js/JSWebAssemblyCallee.cpp:
+        (JSC::JSWebAssemblyCallee::finishCreation):
+        * wasm/js/JSWebAssemblyCallee.h:
+        (JSC::JSWebAssemblyCallee::create):
+        (JSC::JSWebAssemblyCallee::jsToWasmEntryPoint):
+        * wasm/js/JSWebAssemblyInstance.cpp:
+        (JSC::JSWebAssemblyInstance::create):
+        (JSC::JSWebAssemblyInstance::JSWebAssemblyInstance):
+        (JSC::JSWebAssemblyInstance::visitChildren):
+        * wasm/js/JSWebAssemblyInstance.h: hold the import functions off the end of the Instance
+        (JSC::JSWebAssemblyInstance::importFunction):
+        (JSC::JSWebAssemblyInstance::importFunctions):
+        (JSC::JSWebAssemblyInstance::setImportFunction):
+        (JSC::JSWebAssemblyInstance::offsetOfImportFunctions):
+        (JSC::JSWebAssemblyInstance::offsetOfImportFunction):
+        (JSC::JSWebAssemblyInstance::allocationSize):
+        * wasm/js/JSWebAssemblyModule.cpp:
+        (JSC::JSWebAssemblyModule::create):
+        (JSC::JSWebAssemblyModule::JSWebAssemblyModule):
+        (JSC::JSWebAssemblyModule::visitChildren):
+        * wasm/js/JSWebAssemblyModule.h: hold the link call info, the import function stubs, and the function index space
+        (JSC::JSWebAssemblyModule::signatureForFunctionIndexSpace):
+        (JSC::JSWebAssemblyModule::importCount):
+        (JSC::JSWebAssemblyModule::calleeFromFunctionIndexSpace):
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::callWebAssemblyFunction): set top Instance on VM
+        * wasm/js/WebAssemblyFunction.h:
+        (JSC::WebAssemblyFunction::instance):
+        * wasm/js/WebAssemblyInstanceConstructor.cpp:
+        (JSC::constructJSWebAssemblyInstance): handle function imports
+        * wasm/js/WebAssemblyModuleConstructor.cpp:
+        (JSC::constructJSWebAssemblyModule): generate the stubs for import functions
+        * wasm/js/WebAssemblyModuleRecord.cpp:
+        (JSC::WebAssemblyModuleRecord::link):
+        * wasm/js/WebAssemblyToJSCallee.cpp: Copied from Source/JavaScriptCore/wasm/js/JSWebAssemblyCallee.cpp.
+        (JSC::WebAssemblyToJSCallee::create): dummy JSCell singleton which lives on the VM, and is put as the callee in the import stub's frame to identified it when unwinding
+        (JSC::WebAssemblyToJSCallee::createStructure):
+        (JSC::WebAssemblyToJSCallee::WebAssemblyToJSCallee):
+        (JSC::WebAssemblyToJSCallee::finishCreation):
+        (JSC::WebAssemblyToJSCallee::destroy):
+        * wasm/js/WebAssemblyToJSCallee.h: Copied from Source/JavaScriptCore/wasm/WasmB3IRGenerator.h.
+
+2016-12-08  Mark Lam  
+
+        Enable JSC restricted options by default in the jsc shell.
+        https://bugs.webkit.org/show_bug.cgi?id=165615
+
+        Reviewed by Keith Miller.
+
+        The jsc shell is only used for debugging and development testing.  We should
+        allow it to use restricted options like JSC_useDollarVM even for release builds.
+
+        * jsc.cpp:
+        (jscmain):
+        * runtime/Options.cpp:
+        (JSC::Options::enableRestrictedOptions):
+        (JSC::Options::isAvailable):
+        (JSC::allowRestrictedOptions): Deleted.
+        * runtime/Options.h:
+
+2016-12-08  Chris Dumez  
+
+        Unreviewed, rolling out r209489.
+
+        Likely caused large regressions on JetStream, Sunspider and
+        Speedometer
+
+        Reverted changeset:
+
+        "Add system trace points for JavaScript VM entry/exit"
+        https://bugs.webkit.org/show_bug.cgi?id=165550
+        http://trac.webkit.org/changeset/209489
+
+2016-12-08  Keith Miller  
+
+        Move LEB tests to API tests
+        https://bugs.webkit.org/show_bug.cgi?id=165586
+
+        Reviewed by Saam Barati.
+
+        Delete old stuff.
+
+        * testWasm.cpp:
+        (printUsageStatement):
+        (CommandLine::parseArguments):
+        (main):
+        (runLEBTests): Deleted.
+
+2016-12-07  JF Bastien  
+
+        Cleanup WebAssembly's RETURN_IF_EXCEPTION
+        https://bugs.webkit.org/show_bug.cgi?id=165595
+
+        Reviewed by Filip Pizlo.
+
+        * wasm/js/WebAssemblyCompileErrorConstructor.cpp:
+        (JSC::constructJSWebAssemblyCompileError):
+        * wasm/js/WebAssemblyFunction.cpp:
+        (JSC::callWebAssemblyFunction):
+        * wasm/js/WebAssemblyRuntimeErrorConstructor.cpp:
+        (JSC::constructJSWebAssemblyRuntimeError):
+
+2016-12-07  Geoffrey Garen  
+
+        Renamed SourceCode members to match their accessor names
+        https://bugs.webkit.org/show_bug.cgi?id=165573
+
+        Reviewed by Keith Miller.
+
+        startChar => startOffset
+        endChar => endOffset
+
+        * parser/UnlinkedSourceCode.h:
+        (JSC::UnlinkedSourceCode::UnlinkedSourceCode):
+        (JSC::UnlinkedSourceCode::view):
+        (JSC::UnlinkedSourceCode::startOffset):
+        (JSC::UnlinkedSourceCode::endOffset):
+        (JSC::UnlinkedSourceCode::length):
+
+2016-12-07  Keith Miller  
+
+        Add more missing trivial wasm ops.
+        https://bugs.webkit.org/show_bug.cgi?id=165564
+
+        Reviewed by Geoffrey Garen.
+
+        This patch adds the nop, drop, and tee_local opcodes.
+        It also fixes an issue where we were not generating
+        the proper enums for the grow_memory and current_memory
+        opcodes.
+
+        * wasm/WasmFunctionParser.h:
+        (JSC::Wasm::FunctionParser::parseExpression):
+        * wasm/generateWasmOpsHeader.py:
+
+2016-12-07  Geoffrey Garen  
+
+        Renamed source => parentSource
+        https://bugs.webkit.org/show_bug.cgi?id=165570
+
+        Reviewed by Keith Miller.
+
+        For less confuse.
+
+        * bytecode/UnlinkedFunctionExecutable.cpp:
+        (JSC::UnlinkedFunctionExecutable::UnlinkedFunctionExecutable):
+
+2016-12-07  Yusuke Suzuki  
+
+        [JSC] Drop translate phase in module loader
+        https://bugs.webkit.org/show_bug.cgi?id=164861
+
+        Reviewed by Saam Barati.
+
+        Originally, this "translate" phase was introduced to the module loader.
+        However, recent rework discussion[1] starts dropping this phase.
+        And this "translate" phase is meaningless in the browser side module loader
+        since this phase originally mimics the node.js's translation hook (like,
+        transpiling CoffeeScript source to JavaScript).
+
+        This "translate" phase is not necessary for the exposed HTML5
+        
+
+import cssmin
+import jsmin
+import os.path
+import re
+import sys
+
+
+def main(argv):
+
+    if len(argv) < 2:
+        print('usage: %s inputFile outputFile' % argv[0])
+        return 1
+
+    inputFileName = argv[1]
+    outputFileName = argv[2]
+    importsDir = os.path.dirname(inputFileName)
+
+    inputFile = open(inputFileName, 'r')
+    inputContent = inputFile.read()
+    inputFile.close()
+
+    def inline(match, minifier, prefix, postfix):
+        importFileName = match.group(1)
+        fullPath = os.path.join(importsDir, importFileName)
+        if not os.access(fullPath, os.F_OK):
+            raise Exception('File %s referenced in %s not found' % (importFileName, inputFileName))
+        importFile = open(fullPath, 'r')
+        importContent = minifier(importFile.read())
+        importFile.close()
+        return '%s%s%s' % (prefix, importContent, postfix)
+
+    def inlineStylesheet(match):
+        return inline(match, cssmin.cssminify, "")
+
+    def inlineScript(match):
+        return inline(match, jsmin.jsmin, "")
+
+    outputContent = re.sub(r'', inlineStylesheet, inputContent)
+    outputContent = re.sub(r'', inlineScript, outputContent)
+
+    outputFile = open(outputFileName, 'w')
+    outputFile.write(outputContent)
+    outputFile.close()
+
+    # Touch output file directory to make sure that Xcode will copy
+    # modified resource files.
+    if sys.platform == 'darwin':
+        outputDirName = os.path.dirname(outputFileName)
+        os.utime(outputDirName, None)
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv))
diff --git a/Scripts/jsmin.py b/Scripts/jsmin.py
new file mode 100644
index 0000000..372418b
--- /dev/null
+++ b/Scripts/jsmin.py
@@ -0,0 +1,238 @@
+# This code is original from jsmin by Douglas Crockford, it was translated to
+# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
+#
+# The MIT License (MIT)
+#
+# Copyright (c) 2013 Dave St.Germain
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+
+import sys
+is_3 = sys.version_info >= (3, 0)
+if is_3:
+    import io
+else:
+    import StringIO
+    try:
+        import cStringIO
+    except ImportError:
+        cStringIO = None
+
+
+__all__ = ['jsmin', 'JavascriptMinify']
+__version__ = '2.0.9'
+
+
+def jsmin(js):
+    """
+    returns a minified version of the javascript string
+    """
+    if not is_3:
+        if cStringIO and not isinstance(js, unicode):
+            # strings can use cStringIO for a 3x performance
+            # improvement, but unicode (in python2) cannot
+            klass = cStringIO.StringIO
+        else:
+            klass = StringIO.StringIO
+    else:
+        klass = io.StringIO
+    ins = klass(js)
+    outs = klass()
+    JavascriptMinify(ins, outs).minify()
+    return outs.getvalue()
+
+
+class JavascriptMinify(object):
+    """
+    Minify an input stream of javascript, writing
+    to an output stream
+    """
+
+    def __init__(self, instream=None, outstream=None):
+        self.ins = instream
+        self.outs = outstream
+
+    def minify(self, instream=None, outstream=None):
+        if instream and outstream:
+            self.ins, self.outs = instream, outstream
+
+        self.is_return = False
+        self.return_buf = ''
+
+        def write(char):
+            # all of this is to support literal regular expressions.
+            # sigh
+            if char in 'return':
+                self.return_buf += char
+                self.is_return = self.return_buf == 'return'
+            self.outs.write(char)
+            if self.is_return:
+                self.return_buf = ''
+
+        read = self.ins.read
+
+        space_strings = "abcdefghijklmnopqrstuvwxyz"\
+        "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\"
+        starters, enders = '{[(+-', '}])+-"\''
+        newlinestart_strings = starters + space_strings
+        newlineend_strings = enders + space_strings
+        do_newline = False
+        do_space = False
+        escape_slash_count = 0
+        doing_single_comment = False
+        previous_before_comment = ''
+        doing_multi_comment = False
+        in_re = False
+        in_quote = ''
+        quote_buf = []
+
+        previous = read(1)
+        if previous == '\\':
+            escape_slash_count += 1
+        next1 = read(1)
+        if previous == '/':
+            if next1 == '/':
+                doing_single_comment = True
+            elif next1 == '*':
+                doing_multi_comment = True
+                previous = next1
+                next1 = read(1)
+            else:
+                write(previous)
+        elif not previous:
+            return
+        elif previous >= '!':
+            if previous in "'\"":
+                in_quote = previous
+            write(previous)
+            previous_non_space = previous
+        else:
+            previous_non_space = ' '
+        if not next1:
+            return
+
+        while 1:
+            next2 = read(1)
+            if not next2:
+                last = next1.strip()
+                if not (doing_single_comment or doing_multi_comment)\
+                    and last not in ('', '/'):
+                    if in_quote:
+                        write(''.join(quote_buf))
+                    write(last)
+                break
+            if doing_multi_comment:
+                if next1 == '*' and next2 == '/':
+                    doing_multi_comment = False
+                    next2 = read(1)
+            elif doing_single_comment:
+                if next1 in '\r\n':
+                    doing_single_comment = False
+                    while next2 in '\r\n':
+                        next2 = read(1)
+                        if not next2:
+                            break
+                    if previous_before_comment in ')}]':
+                        do_newline = True
+                    elif previous_before_comment in space_strings:
+                        write('\n')
+            elif in_quote:
+                quote_buf.append(next1)
+
+                if next1 == in_quote:
+                    numslashes = 0
+                    for c in reversed(quote_buf[:-1]):
+                        if c != '\\':
+                            break
+                        else:
+                            numslashes += 1
+                    if numslashes % 2 == 0:
+                        in_quote = ''
+                        write(''.join(quote_buf))
+            elif next1 in '\r\n':
+                if previous_non_space in newlineend_strings \
+                    or previous_non_space > '~':
+                    while 1:
+                        if next2 < '!':
+                            next2 = read(1)
+                            if not next2:
+                                break
+                        else:
+                            if next2 in newlinestart_strings \
+                                or next2 > '~' or next2 == '/':
+                                do_newline = True
+                            break
+            elif next1 < '!' and not in_re:
+                if (previous_non_space in space_strings \
+                    or previous_non_space > '~') \
+                    and (next2 in space_strings or next2 > '~'):
+                    do_space = True
+                elif previous_non_space in '-+' and next2 == previous_non_space:
+                    # protect against + ++ or - -- sequences
+                    do_space = True
+                elif self.is_return and next2 == '/':
+                    # returning a regex...
+                    write(' ')
+            elif next1 == '/':
+                if do_space:
+                    write(' ')
+                if in_re:
+                    if previous != '\\' or (not escape_slash_count % 2) or next2 in 'gimy':
+                        in_re = False
+                    write('/')
+                elif next2 == '/':
+                    doing_single_comment = True
+                    previous_before_comment = previous_non_space
+                elif next2 == '*':
+                    doing_multi_comment = True
+                    previous = next1
+                    next1 = next2
+                    next2 = read(1)
+                else:
+                    in_re = previous_non_space in '(,=:[?!&|' or self.is_return  # literal regular expression
+                    write('/')
+            else:
+                if do_space:
+                    do_space = False
+                    write(' ')
+                if do_newline:
+                    write('\n')
+                    do_newline = False
+
+                write(next1)
+                if not in_re and next1 in "'\"`":
+                    in_quote = next1
+                    quote_buf = []
+
+            previous = next1
+            next1 = next2
+
+            if previous >= '!':
+                previous_non_space = previous
+
+            if previous == '\\':
+                escape_slash_count += 1
+            else:
+                escape_slash_count = 0
+
+if __name__ == '__main__':
+    minifier = JavascriptMinify(sys.stdin, sys.stdout)
+    minifier.minify()
+    sys.stdout.write('\n')
diff --git a/Scripts/lazywriter.py b/Scripts/lazywriter.py
new file mode 100644
index 0000000..f93a2c6
--- /dev/null
+++ b/Scripts/lazywriter.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Apple Inc. All rights reserved.
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# A writer that only updates file if it actually changed.
+
+
+class LazyFileWriter:
+    def __init__(self, filepath, force_output):
+        self._filepath = filepath
+        self._output = ""
+        self.force_output = force_output
+
+    def write(self, text):
+        self._output += text
+
+    def close(self):
+        text_changed = True
+        self._output = self._output.rstrip() + "\n"
+
+        try:
+            if self.force_output:
+                raise
+
+            read_file = open(self._filepath, "r")
+            old_text = read_file.read()
+            read_file.close()
+            text_changed = old_text != self._output
+        except:
+            # Ignore, just overwrite by default
+            pass
+
+        if text_changed or self.force_output:
+            out_file = open(self._filepath, "w")
+            out_file.write(self._output)
+            out_file.close()
diff --git a/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js b/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js
new file mode 100644
index 0000000..b45d81c
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Combined.js
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseRejectReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(reactions, reason);
+}
+
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseFulfillReactions;
+    promise.@promiseResult = value;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(reactions, value);
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js b/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js
new file mode 100644
index 0000000..b45d81c
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-Builtin.Promise-Separate.js
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseRejectReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(reactions, reason);
+}
+
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseFulfillReactions;
+    promise.@promiseResult = value;
+    promise.@promiseFulfillReactions = undefined;
+    promise.@promiseRejectReactions = undefined;
+    promise.@promiseState = @promiseFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(reactions, value);
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js b/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js
new file mode 100644
index 0000000..5448b98
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Combined.js
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.every requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.every requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js b/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js
new file mode 100644
index 0000000..5448b98
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-Builtin.prototype-Separate.js
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.every requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.every requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this === null)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be null");
+    
+    if (this === undefined)
+        throw new @TypeError("Array.prototype.forEach requires that |this| not be undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        throw new @TypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = arguments.length > 1 ? arguments[1] : undefined;
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js b/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js
new file mode 100644
index 0000000..9e8c1b4
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Combined.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    // TODO: Need isConstructor(this) instead of typeof "function" check.
+    var array = typeof this === 'function' ? new this(length) : new @Array(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = arguments.length > 1 ? arguments[1] : undefined;
+
+    var thisArg;
+
+    if (mapFn !== undefined) {
+        if (typeof mapFn !== "function")
+            throw new @TypeError("Array.from requires that the second argument, when provided, be a function");
+
+        if (arguments.length > 2)
+            thisArg = arguments[2];
+    }
+
+    if (items == null)
+        throw new @TypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items[@symbolIterator];
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            throw new @TypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+        var result = (typeof thisObj === "function") ? @Object(new thisObj()) : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {
+            [@symbolIterator]() {
+                return iterator;
+            }
+        };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+    var result = (typeof thisObj === "function") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js b/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js
new file mode 100644
index 0000000..9e8c1b4
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-BuiltinConstructor-Separate.js
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    // TODO: Need isConstructor(this) instead of typeof "function" check.
+    var array = typeof this === 'function' ? new this(length) : new @Array(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = arguments.length > 1 ? arguments[1] : undefined;
+
+    var thisArg;
+
+    if (mapFn !== undefined) {
+        if (typeof mapFn !== "function")
+            throw new @TypeError("Array.from requires that the second argument, when provided, be a function");
+
+        if (arguments.length > 2)
+            thisArg = arguments[2];
+    }
+
+    if (items == null)
+        throw new @TypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items[@symbolIterator];
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            throw new @TypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+        var result = (typeof thisObj === "function") ? @Object(new thisObj()) : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {
+            [@symbolIterator]() {
+                return iterator;
+            }
+        };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    // TODO: Need isConstructor(thisObj) instead of typeof "function" check.
+    var result = (typeof thisObj === "function") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
diff --git a/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js b/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js
new file mode 100644
index 0000000..0a436cf
--- /dev/null
+++ b/Scripts/tests/builtins/JavaScriptCore-InternalClashingNames-Combined.js
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CANON INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL CANON INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
+
+// Testing clashing names (emulating function with same names in different files)
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js b/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js
new file mode 100644
index 0000000..c5fae3f
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-AnotherGuardedInternalBuiltin-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(FETCH_API)
+// @internal
+
+function letsFetch()
+{
+   "use strict";
+
+    return @fetchRequest(new @Request("yes"));
+}
diff --git a/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js b/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js
new file mode 100644
index 0000000..c808b3c
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-ArbitraryConditionalGuard-Separate.js
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API) || USE(CF)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js b/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js
new file mode 100644
index 0000000..73e7c71
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-DuplicateFlagAnnotation-Separate.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+// @internal
diff --git a/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js b/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js
new file mode 100644
index 0000000..6d6fe60
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-DuplicateKeyValueAnnotation-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+// @conditional=USE(CF)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js b/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js
new file mode 100644
index 0000000..2acec58
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-GuardedBuiltin-Separate.js
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js b/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js
new file mode 100644
index 0000000..e95e0c2
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-GuardedInternalBuiltin-Separate.js
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+// @internal
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js b/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js
new file mode 100644
index 0000000..9647f2b
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-UnguardedBuiltin-Separate.js
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function isReadableStreamLocked(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
diff --git a/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js b/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js
new file mode 100644
index 0000000..550c89e
--- /dev/null
+++ b/Scripts/tests/builtins/WebCore-xmlCasingTest-Separate.js
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(STREAMS_API)
+// @internal
+
+function xmlCasingTest(stream)
+{
+   "use strict";
+
+    return !!stream.@reader;
+}
+
+
+function cssCasingTest(stream, reason)
+{
+    "use strict";
+
+    if (stream.@state === @readableStreamClosed)
+        return Promise.resolve();
+    if (stream.@state === @readableStreamErrored)
+        return Promise.reject(stream.@storedError);
+    stream.@queue = [];
+    @finishClosingReadableStream(stream);
+    return @promiseInvokeOrNoop(stream.@underlyingSource, "cancel", [reason]).then(function() { });
+}
+
+
+function urlCasingTest(object, key, args)
+{
+    "use strict";
+
+    try {
+        var method = object[key];
+        if (typeof method === "undefined")
+            return Promise.resolve();
+        var result = method.@apply(object, args);
+        return Promise.resolve(result);
+    }
+    catch(error) {
+        return Promise.reject(error);
+    }
+}
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result
new file mode 100644
index 0000000..9bb21d6
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Combined.js-result
@@ -0,0 +1,161 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* Builtin.Promise */
+extern const char* s_builtinPromiseRejectPromiseCode;
+extern const int s_builtinPromiseRejectPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility;
+extern const char* s_builtinPromiseFulfillPromiseCode;
+extern const int s_builtinPromiseFulfillPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINPROMISE_BUILTIN_DATA(macro) \
+    macro(rejectPromise, builtinPromiseRejectPromise, 2) \
+    macro(fulfillPromise, builtinPromiseFulfillPromise, 2) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinPromiseRejectPromiseCode, rejectPromise, s_builtinPromiseRejectPromiseCodeLength) \
+    macro(builtinPromiseFulfillPromiseCode, fulfillPromise, s_builtinPromiseFulfillPromiseCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(fulfillPromise) \
+    macro(rejectPromise) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseRejectPromiseCodeLength = 410;
+static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseRejectPromiseCode =
+    "(function (promise, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseRejectReactions;\n" \
+    "    promise.@promiseResult = reason;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseRejected;\n" \
+    "    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, reason);\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseFulfillPromiseCodeLength = 409;
+static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseFulfillPromiseCode =
+    "(function (promise, value)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseFulfillReactions;\n" \
+    "    promise.@promiseResult = value;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseFulfilled;\n" \
+    "    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, value);\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result
new file mode 100644
index 0000000..87fdaee
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.Promise-Separate.js-result
@@ -0,0 +1,160 @@
+### Begin File: BuiltinPromiseBuiltins.h
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* Builtin.Promise */
+extern const char* s_builtinPromiseRejectPromiseCode;
+extern const int s_builtinPromiseRejectPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility;
+extern const char* s_builtinPromiseFulfillPromiseCode;
+extern const int s_builtinPromiseFulfillPromiseCodeLength;
+extern const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTIN_PROMISE_BUILTIN_DATA(macro) \
+    macro(rejectPromise, builtinPromiseRejectPromise, 2) \
+    macro(fulfillPromise, builtinPromiseFulfillPromise, 2) \
+
+#define JSC_BUILTIN_BUILTIN_PROMISE_REJECTPROMISE 1
+#define JSC_BUILTIN_BUILTIN_PROMISE_FULFILLPROMISE 1
+
+#define JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(macro) \
+    macro(builtinPromiseRejectPromiseCode, rejectPromise, s_builtinPromiseRejectPromiseCodeLength) \
+    macro(builtinPromiseFulfillPromiseCode, fulfillPromise, s_builtinPromiseFulfillPromiseCodeLength) \
+
+#define JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_FUNCTION_NAME(macro) \
+    macro(fulfillPromise) \
+    macro(rejectPromise) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinPromiseBuiltins.h
+
+### Begin File: BuiltinPromiseBuiltins.cpp
+/*
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinPromiseBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPromiseRejectPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseRejectPromiseCodeLength = 410;
+static const JSC::Intrinsic s_builtinPromiseRejectPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseRejectPromiseCode =
+    "(function (promise, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseRejectReactions;\n" \
+    "    promise.@promiseResult = reason;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseRejected;\n" \
+    "    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, reason);\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPromiseFulfillPromiseCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPromiseFulfillPromiseCodeLength = 409;
+static const JSC::Intrinsic s_builtinPromiseFulfillPromiseCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPromiseFulfillPromiseCode =
+    "(function (promise, value)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var reactions = promise.@promiseFulfillReactions;\n" \
+    "    promise.@promiseResult = value;\n" \
+    "    promise.@promiseFulfillReactions = undefined;\n" \
+    "    promise.@promiseRejectReactions = undefined;\n" \
+    "    promise.@promiseState = @promiseFulfilled;\n" \
+    "    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);\n" \
+    "    @triggerPromiseReactions(reactions, value);\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN.PROMISE_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinPromiseBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result
new file mode 100644
index 0000000..6bf696f
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Combined.js-result
@@ -0,0 +1,185 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* Builtin.prototype */
+extern const char* s_builtinPrototypeEveryCode;
+extern const int s_builtinPrototypeEveryCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility;
+extern const char* s_builtinPrototypeForEachCode;
+extern const int s_builtinPrototypeForEachCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINPROTOTYPE_BUILTIN_DATA(macro) \
+    macro(every, builtinPrototypeEvery, 1) \
+    macro(forEach, builtinPrototypeForEach, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinPrototypeEveryCode, every, s_builtinPrototypeEveryCodeLength) \
+    macro(builtinPrototypeForEachCode, forEach, s_builtinPrototypeForEachCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(every) \
+    macro(forEach) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeEveryCodeLength = 760;
+static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeEveryCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.every callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (!(i in array))\n" \
+    "            continue;\n" \
+    "        if (!callback.@call(thisArg, array[i], i, array))\n" \
+    "            return false;\n" \
+    "    }\n" \
+    "    \n" \
+    "    return true;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeForEachCodeLength = 692;
+static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeForEachCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (i in array)\n" \
+    "            callback.@call(thisArg, array[i], i, array);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result
new file mode 100644
index 0000000..d0c8f26
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-Builtin.prototype-Separate.js-result
@@ -0,0 +1,184 @@
+### Begin File: BuiltinPrototypeBuiltins.h
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* Builtin.prototype */
+extern const char* s_builtinPrototypeEveryCode;
+extern const int s_builtinPrototypeEveryCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility;
+extern const char* s_builtinPrototypeForEachCode;
+extern const int s_builtinPrototypeForEachCodeLength;
+extern const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTIN_PROTOTYPE_BUILTIN_DATA(macro) \
+    macro(every, builtinPrototypeEvery, 1) \
+    macro(forEach, builtinPrototypeForEach, 1) \
+
+#define JSC_BUILTIN_BUILTIN_PROTOTYPE_EVERY 1
+#define JSC_BUILTIN_BUILTIN_PROTOTYPE_FOREACH 1
+
+#define JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(macro) \
+    macro(builtinPrototypeEveryCode, every, s_builtinPrototypeEveryCodeLength) \
+    macro(builtinPrototypeForEachCode, forEach, s_builtinPrototypeForEachCodeLength) \
+
+#define JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_FUNCTION_NAME(macro) \
+    macro(every) \
+    macro(forEach) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinPrototypeBuiltins.h
+
+### Begin File: BuiltinPrototypeBuiltins.cpp
+/*
+ * Copyright (c) 2014, 2015, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015 Yusuke Suzuki .
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinPrototypeBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinPrototypeEveryCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeEveryCodeLength = 760;
+static const JSC::Intrinsic s_builtinPrototypeEveryCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeEveryCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.every requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.every callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (!(i in array))\n" \
+    "            continue;\n" \
+    "        if (!callback.@call(thisArg, array[i], i, array))\n" \
+    "            return false;\n" \
+    "    }\n" \
+    "    \n" \
+    "    return true;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinPrototypeForEachCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinPrototypeForEachCodeLength = 692;
+static const JSC::Intrinsic s_builtinPrototypeForEachCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinPrototypeForEachCode =
+    "(function (callback )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (this === null)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be null\");\n" \
+    "    \n" \
+    "    if (this === undefined)\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach requires that |this| not be undefined\");\n" \
+    "    \n" \
+    "    var array = @Object(this);\n" \
+    "    var length = @toLength(array.length);\n" \
+    "    if (typeof callback !== \"function\")\n" \
+    "        throw new @TypeError(\"Array.prototype.forEach callback must be a function\");\n" \
+    "    \n" \
+    "    var thisArg = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    \n" \
+    "    for (var i = 0; i < length; i++) {\n" \
+    "        if (i in array)\n" \
+    "            callback.@call(thisArg, array[i], i, array);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN.PROTOTYPE_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinPrototypeBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result
new file mode 100644
index 0000000..023a829
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Combined.js-result
@@ -0,0 +1,198 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* BuiltinConstructor */
+extern const char* s_builtinConstructorOfCode;
+extern const int s_builtinConstructorOfCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility;
+extern const char* s_builtinConstructorFromCode;
+extern const int s_builtinConstructorFromCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \
+    macro(of, builtinConstructorOf, 0) \
+    macro(from, builtinConstructorFrom, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(builtinConstructorOfCode, of, s_builtinConstructorOfCodeLength) \
+    macro(builtinConstructorFromCode, from, s_builtinConstructorFromCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(from) \
+    macro(of) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorOfCodeLength = 286;
+static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorOfCode =
+    "(function ()\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var length = arguments.length;\n" \
+    "    var array = typeof this === 'function' ? new this(length) : new @Array(length);\n" \
+    "    for (var k = 0; k < length; ++k)\n" \
+    "        @putByValDirect(array, k, arguments[k]);\n" \
+    "    array.length = length;\n" \
+    "    return array;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorFromCodeLength = 1979;
+static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorFromCode =
+    "(function (items )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var thisObj = this;\n" \
+    "    var mapFn = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    var thisArg;\n" \
+    "    if (mapFn !== undefined) {\n" \
+    "        if (typeof mapFn !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the second argument, when provided, be a function\");\n" \
+    "        if (arguments.length > 2)\n" \
+    "            thisArg = arguments[2];\n" \
+    "    }\n" \
+    "    if (items == null)\n" \
+    "        throw new @TypeError(\"Array.from requires an array-like object - not null or undefined\");\n" \
+    "    var iteratorMethod = items[@symbolIterator];\n" \
+    "    if (iteratorMethod != null) {\n" \
+    "        if (typeof iteratorMethod !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function\");\n" \
+    "        var result = (typeof thisObj === \"function\") ? @Object(new thisObj()) : [];\n" \
+    "        var k = 0;\n" \
+    "        var iterator = iteratorMethod.@call(items);\n" \
+    "        var wrapper = {\n" \
+    "            [@symbolIterator]() {\n" \
+    "                return iterator;\n" \
+    "            }\n" \
+    "        };\n" \
+    "        for (var value of wrapper) {\n" \
+    "            if (mapFn)\n" \
+    "                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "            else\n" \
+    "                @putByValDirect(result, k, value);\n" \
+    "            k += 1;\n" \
+    "        }\n" \
+    "        result.length = k;\n" \
+    "        return result;\n" \
+    "    }\n" \
+    "    var arrayLike = @Object(items);\n" \
+    "    var arrayLikeLength = @toLength(arrayLike.length);\n" \
+    "    var result = (typeof thisObj === \"function\") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);\n" \
+    "    var k = 0;\n" \
+    "    while (k < arrayLikeLength) {\n" \
+    "        var value = arrayLike[k];\n" \
+    "        if (mapFn)\n" \
+    "            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "        else\n" \
+    "            @putByValDirect(result, k, value);\n" \
+    "        k += 1;\n" \
+    "    }\n" \
+    "    result.length = arrayLikeLength;\n" \
+    "    return result;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result
new file mode 100644
index 0000000..8000b69
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-BuiltinConstructor-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: BuiltinConstructorBuiltins.h
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace JSC {
+
+/* BuiltinConstructor */
+extern const char* s_builtinConstructorOfCode;
+extern const int s_builtinConstructorOfCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility;
+extern const char* s_builtinConstructorFromCode;
+extern const int s_builtinConstructorFromCodeLength;
+extern const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility;
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_DATA(macro) \
+    macro(of, builtinConstructorOf, 0) \
+    macro(from, builtinConstructorFrom, 1) \
+
+#define JSC_BUILTIN_BUILTINCONSTRUCTOR_OF 1
+#define JSC_BUILTIN_BUILTINCONSTRUCTOR_FROM 1
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(macro) \
+    macro(builtinConstructorOfCode, of, s_builtinConstructorOfCodeLength) \
+    macro(builtinConstructorFromCode, from, s_builtinConstructorFromCodeLength) \
+
+#define JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_FUNCTION_NAME(macro) \
+    macro(from) \
+    macro(of) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: BuiltinConstructorBuiltins.h
+
+### Begin File: BuiltinConstructorBuiltins.cpp
+/*
+ * Copyright (c) 2015, 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "BuiltinConstructorBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_builtinConstructorOfCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorOfCodeLength = 286;
+static const JSC::Intrinsic s_builtinConstructorOfCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorOfCode =
+    "(function ()\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var length = arguments.length;\n" \
+    "    var array = typeof this === 'function' ? new this(length) : new @Array(length);\n" \
+    "    for (var k = 0; k < length; ++k)\n" \
+    "        @putByValDirect(array, k, arguments[k]);\n" \
+    "    array.length = length;\n" \
+    "    return array;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_builtinConstructorFromCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_builtinConstructorFromCodeLength = 1979;
+static const JSC::Intrinsic s_builtinConstructorFromCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_builtinConstructorFromCode =
+    "(function (items )\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    var thisObj = this;\n" \
+    "    var mapFn = arguments.length > 1 ? arguments[1] : undefined;\n" \
+    "    var thisArg;\n" \
+    "    if (mapFn !== undefined) {\n" \
+    "        if (typeof mapFn !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the second argument, when provided, be a function\");\n" \
+    "        if (arguments.length > 2)\n" \
+    "            thisArg = arguments[2];\n" \
+    "    }\n" \
+    "    if (items == null)\n" \
+    "        throw new @TypeError(\"Array.from requires an array-like object - not null or undefined\");\n" \
+    "    var iteratorMethod = items[@symbolIterator];\n" \
+    "    if (iteratorMethod != null) {\n" \
+    "        if (typeof iteratorMethod !== \"function\")\n" \
+    "            throw new @TypeError(\"Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function\");\n" \
+    "        var result = (typeof thisObj === \"function\") ? @Object(new thisObj()) : [];\n" \
+    "        var k = 0;\n" \
+    "        var iterator = iteratorMethod.@call(items);\n" \
+    "        var wrapper = {\n" \
+    "            [@symbolIterator]() {\n" \
+    "                return iterator;\n" \
+    "            }\n" \
+    "        };\n" \
+    "        for (var value of wrapper) {\n" \
+    "            if (mapFn)\n" \
+    "                @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "            else\n" \
+    "                @putByValDirect(result, k, value);\n" \
+    "            k += 1;\n" \
+    "        }\n" \
+    "        result.length = k;\n" \
+    "        return result;\n" \
+    "    }\n" \
+    "    var arrayLike = @Object(items);\n" \
+    "    var arrayLikeLength = @toLength(arrayLike.length);\n" \
+    "    var result = (typeof thisObj === \"function\") ? @Object(new thisObj(arrayLikeLength)) : new @Array(arrayLikeLength);\n" \
+    "    var k = 0;\n" \
+    "    while (k < arrayLikeLength) {\n" \
+    "        var value = arrayLike[k];\n" \
+    "        if (mapFn)\n" \
+    "            @putByValDirect(result, k, thisArg === undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));\n" \
+    "        else\n" \
+    "            @putByValDirect(result, k, value);\n" \
+    "        k += 1;\n" \
+    "    }\n" \
+    "    result.length = arrayLikeLength;\n" \
+    "    return result;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTINCONSTRUCTOR_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: BuiltinConstructorBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error b/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error
new file mode 100644
index 0000000..eb147c4
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-error
@@ -0,0 +1 @@
+ERROR: There are several internal functions with the same name. Private identifiers may clash.
diff --git a/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result b/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result
new file mode 100644
index 0000000..8cbb539
--- /dev/null
+++ b/Scripts/tests/builtins/expected/JavaScriptCore-InternalClashingNames-Combined.js-result
@@ -0,0 +1,148 @@
+### Begin File: JSCBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+namespace JSC {
+class FunctionExecutable;
+class VM;
+
+enum class ConstructAbility : unsigned;
+}
+
+namespace JSC {
+
+/* InternalClashingNames */
+extern const char* s_internalClashingNamesIsReadableStreamLockedCode;
+extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility;
+extern const char* s_internalClashingNamesIsReadableStreamLockedCode;
+extern const int s_internalClashingNamesIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility;
+
+#define JSC_FOREACH_INTERNALCLASHINGNAMES_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, internalClashingNamesIsReadableStreamLocked, 1) \
+    macro(isReadableStreamLocked, internalClashingNamesIsReadableStreamLocked, 1) \
+
+#define JSC_FOREACH_BUILTIN_CODE(macro) \
+    macro(internalClashingNamesIsReadableStreamLockedCode, isReadableStreamLocked, s_internalClashingNamesIsReadableStreamLockedCodeLength) \
+    macro(internalClashingNamesIsReadableStreamLockedCode, isReadableStreamLocked, s_internalClashingNamesIsReadableStreamLockedCodeLength) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define JSC_FOREACH_BUILTIN_FUNCTION_PRIVATE_GLOBAL_NAME(macro) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+} // namespace JSC
+### End File: JSCBuiltins.h
+
+### Begin File: JSCBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "JSCBuiltins.h"
+
+#include "BuiltinExecutables.h"
+#include "HeapInlines.h"
+#include "Intrinsic.h"
+#include "JSCellInlines.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VM.h"
+
+namespace JSC {
+
+const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_internalClashingNamesIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_internalClashingNamesIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_internalClashingNamesIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_internalClashingNamesIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_internalClashingNamesIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); }
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace JSC
+### End File: JSCBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result
new file mode 100644
index 0000000..9eae1be
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-AnotherGuardedInternalBuiltin-Separate.js-result
@@ -0,0 +1,227 @@
+### Begin File: AnotherGuardedInternalBuiltinBuiltins.h
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(FETCH_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* AnotherGuardedInternalBuiltin */
+extern const char* s_anotherGuardedInternalBuiltinLetsFetchCode;
+extern const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength;
+extern const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility;
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \
+    macro(letsFetch, anotherGuardedInternalBuiltinLetsFetch, 0) \
+
+#define WEBCORE_BUILTIN_ANOTHERGUARDEDINTERNALBUILTIN_LETSFETCH 1
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(macro) \
+    macro(anotherGuardedInternalBuiltinLetsFetchCode, letsFetch, s_anotherGuardedInternalBuiltinLetsFetchCodeLength) \
+
+#define WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(letsFetch) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class AnotherGuardedInternalBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit AnotherGuardedInternalBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* AnotherGuardedInternalBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void AnotherGuardedInternalBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class AnotherGuardedInternalBuiltinBuiltinFunctions {
+public:
+    explicit AnotherGuardedInternalBuiltinBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void AnotherGuardedInternalBuiltinBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void AnotherGuardedInternalBuiltinBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(FETCH_API)
+### End File: AnotherGuardedInternalBuiltinBuiltins.h
+
+### Begin File: AnotherGuardedInternalBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+
+#if ENABLE(FETCH_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_anotherGuardedInternalBuiltinLetsFetchCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_anotherGuardedInternalBuiltinLetsFetchCodeLength = 82;
+static const JSC::Intrinsic s_anotherGuardedInternalBuiltinLetsFetchCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_anotherGuardedInternalBuiltinLetsFetchCode =
+    "(function ()\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return @fetchRequest(new @Request(\"yes\"));\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().anotherGuardedInternalBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().anotherGuardedInternalBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(FETCH_API)
+
+### End File: AnotherGuardedInternalBuiltinBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result
new file mode 100644
index 0000000..f91b6d9
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-ArbitraryConditionalGuard-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: ArbitraryConditionalGuardBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API) || USE(CF)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* ArbitraryConditionalGuard */
+extern const char* s_arbitraryConditionalGuardIsReadableStreamLockedCode;
+extern const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, arbitraryConditionalGuardIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_ARBITRARYCONDITIONALGUARD_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(macro) \
+    macro(arbitraryConditionalGuardIsReadableStreamLockedCode, isReadableStreamLocked, s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class ArbitraryConditionalGuardBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit ArbitraryConditionalGuardBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* ArbitraryConditionalGuardBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void ArbitraryConditionalGuardBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API) || USE(CF)
+### End File: ArbitraryConditionalGuardBuiltins.h
+
+### Begin File: ArbitraryConditionalGuardBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "ArbitraryConditionalGuardBuiltins.h"
+
+#if ENABLE(STREAMS_API) || USE(CF)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_arbitraryConditionalGuardIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_arbitraryConditionalGuardIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_arbitraryConditionalGuardIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_arbitraryConditionalGuardIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().arbitraryConditionalGuardBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().arbitraryConditionalGuardBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_ARBITRARYCONDITIONALGUARD_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API) || USE(CF)
+
+### End File: ArbitraryConditionalGuardBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error b/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error
new file mode 100644
index 0000000..b15152e
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-DuplicateFlagAnnotation-Separate.js-error
@@ -0,0 +1 @@
+ERROR: Duplicate annotation found: internal
diff --git a/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error b/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error
new file mode 100644
index 0000000..f1b429e
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-DuplicateKeyValueAnnotation-Separate.js-error
@@ -0,0 +1 @@
+ERROR: Duplicate annotation found: conditional
diff --git a/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result
new file mode 100644
index 0000000..e971755
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-GuardedBuiltin-Separate.js-result
@@ -0,0 +1,197 @@
+### Begin File: GuardedBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* GuardedBuiltin */
+extern const char* s_guardedBuiltinIsReadableStreamLockedCode;
+extern const int s_guardedBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, guardedBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_GUARDEDBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(macro) \
+    macro(guardedBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_guardedBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class GuardedBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit GuardedBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* GuardedBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void GuardedBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+### End File: GuardedBuiltinBuiltins.h
+
+### Begin File: GuardedBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "GuardedBuiltinBuiltins.h"
+
+#if ENABLE(STREAMS_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_guardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_guardedBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_guardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_guardedBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().guardedBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().guardedBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_GUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+
+### End File: GuardedBuiltinBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result
new file mode 100644
index 0000000..7bc645a
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-GuardedInternalBuiltin-Separate.js-result
@@ -0,0 +1,229 @@
+### Begin File: GuardedInternalBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* GuardedInternalBuiltin */
+extern const char* s_guardedInternalBuiltinIsReadableStreamLockedCode;
+extern const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, guardedInternalBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_GUARDEDINTERNALBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(macro) \
+    macro(guardedInternalBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_guardedInternalBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class GuardedInternalBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit GuardedInternalBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* GuardedInternalBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void GuardedInternalBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class GuardedInternalBuiltinBuiltinFunctions {
+public:
+    explicit GuardedInternalBuiltinBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void GuardedInternalBuiltinBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void GuardedInternalBuiltinBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+### End File: GuardedInternalBuiltinBuiltins.h
+
+### Begin File: GuardedInternalBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_guardedInternalBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_guardedInternalBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_guardedInternalBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_guardedInternalBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().guardedInternalBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().guardedInternalBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+
+### End File: GuardedInternalBuiltinBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result
new file mode 100644
index 0000000..f1dcace
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-UnguardedBuiltin-Separate.js-result
@@ -0,0 +1,188 @@
+### Begin File: UnguardedBuiltinBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* UnguardedBuiltin */
+extern const char* s_unguardedBuiltinIsReadableStreamLockedCode;
+extern const int s_unguardedBuiltinIsReadableStreamLockedCodeLength;
+extern const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility;
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_DATA(macro) \
+    macro(isReadableStreamLocked, unguardedBuiltinIsReadableStreamLocked, 1) \
+
+#define WEBCORE_BUILTIN_UNGUARDEDBUILTIN_ISREADABLESTREAMLOCKED 1
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(macro) \
+    macro(unguardedBuiltinIsReadableStreamLockedCode, isReadableStreamLocked, s_unguardedBuiltinIsReadableStreamLockedCodeLength) \
+
+#define WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(macro) \
+    macro(isReadableStreamLocked) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class UnguardedBuiltinBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit UnguardedBuiltinBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* UnguardedBuiltinBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void UnguardedBuiltinBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+} // namespace WebCore
+### End File: UnguardedBuiltinBuiltins.h
+
+### Begin File: UnguardedBuiltinBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "UnguardedBuiltinBuiltins.h"
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_unguardedBuiltinIsReadableStreamLockedCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_unguardedBuiltinIsReadableStreamLockedCodeLength = 70;
+static const JSC::Intrinsic s_unguardedBuiltinIsReadableStreamLockedCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_unguardedBuiltinIsReadableStreamLockedCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().unguardedBuiltinBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().unguardedBuiltinBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_UNGUARDEDBUILTIN_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+### End File: UnguardedBuiltinBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result b/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result
new file mode 100644
index 0000000..4af18ac
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCore-xmlCasingTest-Separate.js-result
@@ -0,0 +1,280 @@
+### Begin File: xmlCasingTestBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#if ENABLE(STREAMS_API)
+
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+class FunctionExecutable;
+}
+
+namespace WebCore {
+
+/* xmlCasingTest */
+extern const char* s_xmlCasingTestXMLCasingTestCode;
+extern const int s_xmlCasingTestXMLCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility;
+extern const char* s_xmlCasingTestCssCasingTestCode;
+extern const int s_xmlCasingTestCssCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility;
+extern const char* s_xmlCasingTestUrlCasingTestCode;
+extern const int s_xmlCasingTestUrlCasingTestCodeLength;
+extern const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility;
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_DATA(macro) \
+    macro(xmlCasingTest, xmlCasingTestXMLCasingTest, 1) \
+    macro(cssCasingTest, xmlCasingTestCssCasingTest, 2) \
+    macro(urlCasingTest, xmlCasingTestUrlCasingTest, 3) \
+
+#define WEBCORE_BUILTIN_XMLCASINGTEST_XMLCASINGTEST 1
+#define WEBCORE_BUILTIN_XMLCASINGTEST_CSSCASINGTEST 1
+#define WEBCORE_BUILTIN_XMLCASINGTEST_URLCASINGTEST 1
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(macro) \
+    macro(xmlCasingTestXMLCasingTestCode, xmlCasingTest, s_xmlCasingTestXMLCasingTestCodeLength) \
+    macro(xmlCasingTestCssCasingTestCode, cssCasingTest, s_xmlCasingTestCssCasingTestCodeLength) \
+    macro(xmlCasingTestUrlCasingTestCode, urlCasingTest, s_xmlCasingTestUrlCasingTestCodeLength) \
+
+#define WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(macro) \
+    macro(cssCasingTest) \
+    macro(urlCasingTest) \
+    macro(xmlCasingTest) \
+
+#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+    JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
+
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
+#undef DECLARE_BUILTIN_GENERATOR
+
+class xmlCasingTestBuiltinsWrapper : private JSC::WeakHandleOwner {
+public:
+    explicit xmlCasingTestBuiltinsWrapper(JSC::VM* vm)
+        : m_vm(*vm)
+        WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length)))
+        WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
+    {
+    }
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+    JSC::UnlinkedFunctionExecutable* name##Executable(); \
+    const JSC::SourceCode& name##Source() const { return m_##name##Source; }
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_EXECUTABLES
+
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+
+    void exportNames();
+
+private:
+    JSC::VM& m_vm;
+
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \
+    JSC::SourceCode m_##name##Source;\
+    JSC::Weak m_##name##Executable;
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+
+};
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+inline JSC::UnlinkedFunctionExecutable* xmlCasingTestBuiltinsWrapper::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = JSC::Weak(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef DEFINE_BUILTIN_EXECUTABLES
+
+inline void xmlCasingTestBuiltinsWrapper::exportNames()
+{
+#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
+#undef EXPORT_FUNCTION_NAME
+}
+
+class xmlCasingTestBuiltinFunctions {
+public:
+    explicit xmlCasingTestBuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
+
+    void init(JSC::JSGlobalObject&);
+    void visit(JSC::SlotVisitor&);
+
+public:
+    JSC::VM& m_vm;
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \
+    JSC::WriteBarrier m_##functionName##Function;
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+inline void xmlCasingTestBuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
+{
+#define EXPORT_FUNCTION(codeName, functionName, length)\
+    m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(EXPORT_FUNCTION)
+#undef EXPORT_FUNCTION
+}
+
+inline void xmlCasingTestBuiltinFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
+#undef VISIT_FUNCTION
+}
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+### End File: xmlCasingTestBuiltins.h
+
+### Begin File: xmlCasingTestBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "xmlCasingTestBuiltins.h"
+
+#if ENABLE(STREAMS_API)
+
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+const JSC::ConstructAbility s_xmlCasingTestXMLCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestXMLCasingTestCodeLength = 70;
+static const JSC::Intrinsic s_xmlCasingTestXMLCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestXMLCasingTestCode =
+    "(function (stream)\n" \
+    "{\n" \
+    "   \"use strict\";\n" \
+    "    return !!stream.@reader;\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_xmlCasingTestCssCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestCssCasingTestCodeLength = 401;
+static const JSC::Intrinsic s_xmlCasingTestCssCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestCssCasingTestCode =
+    "(function (stream, reason)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    if (stream.@state === @readableStreamClosed)\n" \
+    "        return Promise.resolve();\n" \
+    "    if (stream.@state === @readableStreamErrored)\n" \
+    "        return Promise.reject(stream.@storedError);\n" \
+    "    stream.@queue = [];\n" \
+    "    @finishClosingReadableStream(stream);\n" \
+    "    return @promiseInvokeOrNoop(stream.@underlyingSource, \"cancel\", [reason]).then(function() { });\n" \
+    "})\n" \
+;
+
+const JSC::ConstructAbility s_xmlCasingTestUrlCasingTestCodeConstructAbility = JSC::ConstructAbility::CannotConstruct;
+const int s_xmlCasingTestUrlCasingTestCodeLength = 337;
+static const JSC::Intrinsic s_xmlCasingTestUrlCasingTestCodeIntrinsic = JSC::NoIntrinsic;
+const char* s_xmlCasingTestUrlCasingTestCode =
+    "(function (object, key, args)\n" \
+    "{\n" \
+    "    \"use strict\";\n" \
+    "    try {\n" \
+    "        var method = object[key];\n" \
+    "        if (typeof method === \"undefined\")\n" \
+    "            return Promise.resolve();\n" \
+    "        var result = method.@apply(object, args);\n" \
+    "        return Promise.resolve(result);\n" \
+    "    }\n" \
+    "    catch(error) {\n" \
+    "        return Promise.reject(error);\n" \
+    "    }\n" \
+    "})\n" \
+;
+
+
+#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \
+JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \
+{\
+    JSVMClientData* clientData = static_cast(vm.clientData); \
+    return clientData->builtinFunctions().xmlCasingTestBuiltins().codeName##Executable()->link(vm, clientData->builtinFunctions().xmlCasingTestBuiltins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
+}
+WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
+#undef DEFINE_BUILTIN_GENERATOR
+
+
+} // namespace WebCore
+
+#endif // ENABLE(STREAMS_API)
+
+### End File: xmlCasingTestBuiltins.cpp
diff --git a/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result b/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result
new file mode 100644
index 0000000..dab4244
--- /dev/null
+++ b/Scripts/tests/builtins/expected/WebCoreJSBuiltins.h-result
@@ -0,0 +1,351 @@
+### Begin File: WebCoreJSBuiltins.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+#include "ArbitraryConditionalGuardBuiltins.h"
+#include "GuardedBuiltinBuiltins.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+#include "UnguardedBuiltinBuiltins.h"
+#include "xmlCasingTestBuiltins.h"
+#include 
+
+namespace WebCore {
+
+class JSBuiltinFunctions {
+public:
+    explicit JSBuiltinFunctions(JSC::VM& vm)
+        : m_vm(vm)
+#if ENABLE(FETCH_API)
+        , m_anotherGuardedInternalBuiltinBuiltins(&m_vm)
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+        , m_arbitraryConditionalGuardBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+        , m_guardedBuiltinBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        , m_guardedInternalBuiltinBuiltins(&m_vm)
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        , m_unguardedBuiltinBuiltins(&m_vm)
+#if ENABLE(STREAMS_API)
+        , m_xmlCasingTestBuiltins(&m_vm)
+#endif // ENABLE(STREAMS_API)
+    {
+#if ENABLE(FETCH_API)
+        m_anotherGuardedInternalBuiltinBuiltins.exportNames();
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+        m_guardedInternalBuiltinBuiltins.exportNames();
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+        m_xmlCasingTestBuiltins.exportNames();
+#endif // ENABLE(STREAMS_API)
+    }
+
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinsWrapper& anotherGuardedInternalBuiltinBuiltins() { return m_anotherGuardedInternalBuiltinBuiltins; }
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+    ArbitraryConditionalGuardBuiltinsWrapper& arbitraryConditionalGuardBuiltins() { return m_arbitraryConditionalGuardBuiltins; }
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+    GuardedBuiltinBuiltinsWrapper& guardedBuiltinBuiltins() { return m_guardedBuiltinBuiltins; }
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinsWrapper& guardedInternalBuiltinBuiltins() { return m_guardedInternalBuiltinBuiltins; }
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    UnguardedBuiltinBuiltinsWrapper& unguardedBuiltinBuiltins() { return m_unguardedBuiltinBuiltins; }
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinsWrapper& xmlCasingTestBuiltins() { return m_xmlCasingTestBuiltins; }
+#endif // ENABLE(STREAMS_API)
+
+private:
+    JSC::VM& m_vm;
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinsWrapper m_anotherGuardedInternalBuiltinBuiltins;
+#endif // ENABLE(FETCH_API)
+#if ENABLE(STREAMS_API) || USE(CF)
+    ArbitraryConditionalGuardBuiltinsWrapper m_arbitraryConditionalGuardBuiltins;
+#endif // ENABLE(STREAMS_API) || USE(CF)
+#if ENABLE(STREAMS_API)
+    GuardedBuiltinBuiltinsWrapper m_guardedBuiltinBuiltins;
+#endif // ENABLE(STREAMS_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinsWrapper m_guardedInternalBuiltinBuiltins;
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    UnguardedBuiltinBuiltinsWrapper m_unguardedBuiltinBuiltins;
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinsWrapper m_xmlCasingTestBuiltins;
+#endif // ENABLE(STREAMS_API)
+};
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltins.h
+
+### Begin File: WebCoreJSBuiltins.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "AnotherGuardedInternalBuiltinBuiltins.cpp"
+#include "ArbitraryConditionalGuardBuiltins.cpp"
+#include "GuardedBuiltinBuiltins.cpp"
+#include "GuardedInternalBuiltinBuiltins.cpp"
+#include "UnguardedBuiltinBuiltins.cpp"
+#include "xmlCasingTestBuiltins.cpp"
+### End File: WebCoreJSBuiltins.cpp
+
+### Begin File: WebCoreJSBuiltinInternals.h
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#pragma once
+
+#include "AnotherGuardedInternalBuiltinBuiltins.h"
+#include "GuardedInternalBuiltinBuiltins.h"
+#include "xmlCasingTestBuiltins.h"
+#include 
+#include 
+
+namespace WebCore {
+
+class JSDOMGlobalObject;
+
+class JSBuiltinInternalFunctions {
+public:
+    explicit JSBuiltinInternalFunctions(JSC::VM&);
+
+    void visit(JSC::SlotVisitor&);
+    void initialize(JSDOMGlobalObject&);
+
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinFunctions& anotherGuardedInternalBuiltin() { return m_anotherGuardedInternalBuiltin; }
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinFunctions& guardedInternalBuiltin() { return m_guardedInternalBuiltin; }
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinFunctions& xmlCasingTest() { return m_xmlCasingTest; }
+#endif // ENABLE(STREAMS_API)
+
+private:
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    JSC::VM& m_vm;
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+#if ENABLE(FETCH_API)
+    AnotherGuardedInternalBuiltinBuiltinFunctions m_anotherGuardedInternalBuiltin;
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    GuardedInternalBuiltinBuiltinFunctions m_guardedInternalBuiltin;
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    XMLCasingTestBuiltinFunctions m_xmlCasingTest;
+#endif // ENABLE(STREAMS_API)
+};
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltinInternals.h
+
+### Begin File: WebCoreJSBuiltinInternals.cpp
+/*
+ * Copyright (c) 2015 Canon Inc. All rights reserved.
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ */
+
+// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
+// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py
+
+#include "config.h"
+#include "WebCoreJSBuiltinInternals.h"
+
+#include "JSDOMGlobalObject.h"
+#include "WebCoreJSClientData.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace WebCore {
+
+JSBuiltinInternalFunctions::JSBuiltinInternalFunctions(JSC::VM& vm)
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    : m_vm(vm)
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+#if ENABLE(FETCH_API)
+    , m_anotherGuardedInternalBuiltin(m_vm)
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    , m_guardedInternalBuiltin(m_vm)
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    , m_xmlCasingTest(m_vm)
+#endif // ENABLE(STREAMS_API)
+{
+    UNUSED_PARAM(vm);
+}
+
+void JSBuiltinInternalFunctions::visit(JSC::SlotVisitor& visitor)
+{
+#if ENABLE(FETCH_API)
+    m_anotherGuardedInternalBuiltin.visit(visitor);
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    m_guardedInternalBuiltin.visit(visitor);
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    m_xmlCasingTest.visit(visitor);
+#endif // ENABLE(STREAMS_API)
+    UNUSED_PARAM(visitor);
+}
+
+void JSBuiltinInternalFunctions::initialize(JSDOMGlobalObject& globalObject)
+{
+    UNUSED_PARAM(globalObject);
+#if ENABLE(FETCH_API)
+    m_anotherGuardedInternalBuiltin.init(globalObject);
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+    m_guardedInternalBuiltin.init(globalObject);
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+    m_xmlCasingTest.init(globalObject);
+#endif // ENABLE(STREAMS_API)
+
+#if ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+    JSVMClientData& clientData = *static_cast(m_vm.clientData);
+    JSDOMGlobalObject::GlobalPropertyInfo staticGlobals[] = {
+#if ENABLE(FETCH_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().anotherGuardedInternalBuiltinBuiltins().name##PrivateName(), anotherGuardedInternalBuiltin().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_ANOTHERGUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(FETCH_API)
+#if ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().guardedInternalBuiltinBuiltins().name##PrivateName(), guardedInternalBuiltin().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_GUARDEDINTERNALBUILTIN_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API)
+#if ENABLE(STREAMS_API)
+#define DECLARE_GLOBAL_STATIC(name) \
+    JSDOMGlobalObject::GlobalPropertyInfo( \
+        clientData.builtinFunctions().xmlCasingTestBuiltins().name##PrivateName(), xmlCasingTest().m_##name##Function.get() , JSC::DontDelete | JSC::ReadOnly),
+    WEBCORE_FOREACH_XMLCASINGTEST_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)
+#undef DECLARE_GLOBAL_STATIC
+#endif // ENABLE(STREAMS_API)
+    };
+    globalObject.addStaticGlobals(staticGlobals, WTF_ARRAY_LENGTH(staticGlobals));
+    UNUSED_PARAM(clientData);
+#endif // ENABLE(FETCH_API) || ENABLE(READABLE_STREAM_API) || ENABLE(WRITABLE_STREAM_API) || ENABLE(STREAMS_API)
+}
+
+} // namespace WebCore
+### End File: WebCoreJSBuiltinInternals.cpp
diff --git a/Scripts/xxd.pl b/Scripts/xxd.pl
new file mode 100644
index 0000000..5ee08a5
--- /dev/null
+++ b/Scripts/xxd.pl
@@ -0,0 +1,45 @@
+#! /usr/bin/perl
+
+# Copyright (C) 2010-2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#    # Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#    # Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#    # Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+$varname = shift;
+$fname = shift;
+$output = shift;
+
+open($input, '<', $fname) or die "Can't open file for read: $fname $!";
+$/ = undef;
+$text = <$input>;
+close($input);
+
+$text = join(', ', map('0x' . unpack("H*", $_), split(undef, $text)));
+
+open($output, '>', $output) or die "Can't open file for write: $output $!";
+print $output "const unsigned char $varname\[\] = {\n$text\n};\n";
+close($output);
diff --git a/THANKS b/THANKS
new file mode 100644
index 0000000..b9a9649
--- /dev/null
+++ b/THANKS
@@ -0,0 +1,8 @@
+
+I would like to thank the following people for their help:
+
+Richard Moore  - for filling the Math object with some life
+Daegeun Lee  - for pointing out some bugs and providing
+                                  much code for the String and Date object.
+Marco Pinelli  - for his patches
+Christian Kirsch  - for his contribution to the Date object
diff --git a/assembler/ARM64Assembler.h b/assembler/ARM64Assembler.h
new file mode 100644
index 0000000..29d5bce
--- /dev/null
+++ b/assembler/ARM64Assembler.h
@@ -0,0 +1,3626 @@
+/*
+ * Copyright (C) 2012, 2014, 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(ARM64)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include 
+#include 
+#include 
+#include 
+
+#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64)
+#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32)
+#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64)
+#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize)
+#define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128)
+#define DATASIZE DATASIZE_OF(datasize)
+#define MEMOPSIZE MEMOPSIZE_OF(datasize)
+#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
+#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
+#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
+
+namespace JSC {
+
+ALWAYS_INLINE bool isInt7(int32_t value)
+{
+    return value == ((value << 25) >> 25);
+}
+
+ALWAYS_INLINE bool isInt11(int32_t value)
+{
+    return value == ((value << 21) >> 21);
+}
+
+ALWAYS_INLINE bool isUInt5(int32_t value)
+{
+    return !(value & ~0x1f);
+}
+
+class UInt5 {
+public:
+    explicit UInt5(int value)
+        : m_value(value)
+    {
+        ASSERT(isUInt5(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class UInt12 {
+public:
+    explicit UInt12(int value)
+        : m_value(value)
+    {
+        ASSERT(isUInt12(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PostIndex {
+public:
+    explicit PostIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt9(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PreIndex {
+public:
+    explicit PreIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt9(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PairPostIndex {
+public:
+    explicit PairPostIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt11(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+class PairPreIndex {
+public:
+    explicit PairPreIndex(int value)
+        : m_value(value)
+    {
+        ASSERT(isInt11(value));
+    }
+
+    operator int() { return m_value; }
+
+private:
+    int m_value;
+};
+
+typedef ARM64LogicalImmediate LogicalImmediate;
+
+inline uint16_t getHalfword(uint64_t value, int which)
+{
+    return value >> (which << 4);
+}
+
+namespace ARM64Registers {
+
+#define FOR_EACH_CPU_REGISTER(V) \
+    FOR_EACH_CPU_GPREGISTER(V) \
+    FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+    /* Parameter/result registers */ \
+    V(void*, x0) \
+    V(void*, x1) \
+    V(void*, x2) \
+    V(void*, x3) \
+    V(void*, x4) \
+    V(void*, x5) \
+    V(void*, x6) \
+    V(void*, x7) \
+    /* Indirect result location register */ \
+    V(void*, x8) \
+    /* Temporary registers */ \
+    V(void*, x9) \
+    V(void*, x10) \
+    V(void*, x11) \
+    V(void*, x12) \
+    V(void*, x13) \
+    V(void*, x14) \
+    V(void*, x15) \
+    /* Intra-procedure-call scratch registers (temporary) */ \
+    V(void*, x16) \
+    V(void*, x17) \
+    /* Platform Register (temporary) */ \
+    V(void*, x18) \
+    /* Callee-saved */ \
+    V(void*, x19) \
+    V(void*, x20) \
+    V(void*, x21) \
+    V(void*, x22) \
+    V(void*, x23) \
+    V(void*, x24) \
+    V(void*, x25) \
+    V(void*, x26) \
+    V(void*, x27) \
+    V(void*, x28) \
+    /* Special */ \
+    V(void*, fp) \
+    V(void*, lr) \
+    V(void*, sp)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    V(void*, pc) \
+    V(void*, nzcv) \
+    V(void*, fpsr) \
+
+// ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf
+// and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf.
+// However, we only use them for 64-bit doubles.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+    /* Parameter/result registers */ \
+    V(double, q0) \
+    V(double, q1) \
+    V(double, q2) \
+    V(double, q3) \
+    V(double, q4) \
+    V(double, q5) \
+    V(double, q6) \
+    V(double, q7) \
+    /* Callee-saved (up to 64-bits only!) */ \
+    V(double, q8) \
+    V(double, q9) \
+    V(double, q10) \
+    V(double, q11) \
+    V(double, q12) \
+    V(double, q13) \
+    V(double, q14) \
+    V(double, q15) \
+    /* Temporary registers */ \
+    V(double, q16) \
+    V(double, q17) \
+    V(double, q18) \
+    V(double, q19) \
+    V(double, q20) \
+    V(double, q21) \
+    V(double, q22) \
+    V(double, q23) \
+    V(double, q24) \
+    V(double, q25) \
+    V(double, q26) \
+    V(double, q27) \
+    V(double, q28) \
+    V(double, q29) \
+    V(double, q30) \
+    V(double, q31)
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+
+    ip0 = x16,
+    ip1 = x17,
+    x29 = fp,
+    x30 = lr,
+    zr = 0x3f,
+} RegisterID;
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} FPRegisterID;
+
+static constexpr bool isSp(RegisterID reg) { return reg == sp; }
+static constexpr bool isZr(RegisterID reg) { return reg == zr; }
+
+} // namespace ARM64Registers
+
+class ARM64Assembler {
+public:
+    typedef ARM64Registers::RegisterID RegisterID;
+    typedef ARM64Registers::FPRegisterID FPRegisterID;
+    
+    static constexpr RegisterID firstRegister() { return ARM64Registers::x0; }
+    static constexpr RegisterID lastRegister() { return ARM64Registers::sp; }
+    
+    static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
+    static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
+
+private:
+    static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
+    static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); }
+
+public:
+    ARM64Assembler()
+        : m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+    
+    AssemblerBuffer& buffer() { return m_buffer; }
+
+    // (HS, LO, HI, LS) -> (AE, B, A, BE)
+    // (VS, VC) -> (O, NO)
+    typedef enum {
+        ConditionEQ,
+        ConditionNE,
+        ConditionHS, ConditionCS = ConditionHS,
+        ConditionLO, ConditionCC = ConditionLO,
+        ConditionMI,
+        ConditionPL,
+        ConditionVS,
+        ConditionVC,
+        ConditionHI,
+        ConditionLS,
+        ConditionGE,
+        ConditionLT,
+        ConditionGT,
+        ConditionLE,
+        ConditionAL,
+        ConditionInvalid
+    } Condition;
+
+    static Condition invert(Condition cond)
+    {
+        return static_cast(cond ^ 1);
+    }
+
+    typedef enum {
+        LSL,
+        LSR,
+        ASR,
+        ROR
+    } ShiftType;
+
+    typedef enum {
+        UXTB,
+        UXTH,
+        UXTW,
+        UXTX,
+        SXTB,
+        SXTH,
+        SXTW,
+        SXTX
+    } ExtendType;
+
+    enum SetFlags {
+        DontSetFlags,
+        S
+    };
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 4)
+    enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+        JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+        JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)),
+        JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+        JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+        JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+        JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+        JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)),
+        JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)),
+    };
+    enum JumpLinkType {
+        LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+        LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)),
+        LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)),
+        LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)),
+        LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)),
+        LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)),
+        LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)),
+        LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)),
+    };
+
+    class LinkRecord {
+    public:
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+        }
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+            data.realTypes.m_is64Bit = is64Bit;
+            data.realTypes.m_compareRegister = compareRegister;
+        }
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+            data.realTypes.m_bitNumber = bitNumber;
+            data.realTypes.m_compareRegister = compareRegister;
+        }
+        void operator=(const LinkRecord& other)
+        {
+            data.copyTypes.content[0] = other.data.copyTypes.content[0];
+            data.copyTypes.content[1] = other.data.copyTypes.content[1];
+            data.copyTypes.content[2] = other.data.copyTypes.content[2];
+        }
+        intptr_t from() const { return data.realTypes.m_from; }
+        void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+        intptr_t to() const { return data.realTypes.m_to; }
+        JumpType type() const { return data.realTypes.m_type; }
+        JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+        void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+        Condition condition() const { return data.realTypes.m_condition; }
+        bool is64Bit() const { return data.realTypes.m_is64Bit; }
+        unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
+        RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
+
+    private:
+        union {
+            struct RealTypes {
+                intptr_t m_from : 48;
+                intptr_t m_to : 48;
+                JumpType m_type : 8;
+                JumpLinkType m_linkType : 8;
+                Condition m_condition : 4;
+                unsigned m_bitNumber : 6;
+                RegisterID m_compareRegister : 6;
+                bool m_is64Bit : 1;
+            } realTypes;
+            struct CopyTypes {
+                uint64_t content[3];
+            } copyTypes;
+            COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+        } data;
+    };
+
+    // bits(N) VFPExpandImm(bits(8) imm8);
+    //
+    // Encoding of floating point immediates is a litte complicated. Here's a
+    // high level description:
+    //     +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7
+    // and the algirithm for expanding to a single precision float:
+    //     return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
+    //
+    // The trickiest bit is how the exponent is handled. The following table
+    // may help clarify things a little:
+    //     654
+    //     100 01111100 124 -3 1020 01111111100
+    //     101 01111101 125 -2 1021 01111111101
+    //     110 01111110 126 -1 1022 01111111110
+    //     111 01111111 127  0 1023 01111111111
+    //     000 10000000 128  1 1024 10000000000
+    //     001 10000001 129  2 1025 10000000001
+    //     010 10000010 130  3 1026 10000000010
+    //     011 10000011 131  4 1027 10000000011
+    // The first column shows the bit pattern stored in bits 6-4 of the arm
+    // encoded immediate. The second column shows the 8-bit IEEE 754 single
+    // -precision exponent in binary, the third column shows the raw decimal
+    // value. IEEE 754 single-precision numbers are stored with a bias of 127
+    // to the exponent, so the fourth column shows the resulting exponent.
+    // From this was can see that the exponent can be in the range -3..4,
+    // which agrees with the high level description given above. The fifth
+    // and sixth columns shows the value stored in a IEEE 754 double-precision
+    // number to represent these exponents in decimal and binary, given the
+    // bias of 1023.
+    //
+    // Ultimately, detecting doubles that can be encoded as immediates on arm
+    // and encoding doubles is actually not too bad. A floating point value can
+    // be encoded by retaining the sign bit, the low three bits of the exponent
+    // and the high 4 bits of the mantissa. To validly be able to encode an
+    // immediate the remainder of the mantissa must be zero, and the high part
+    // of the exponent must match the top bit retained, bar the highest bit
+    // which must be its inverse.
+    static bool canEncodeFPImm(double d)
+    {
+        // Discard the sign bit, the low two bits of the exponent & the highest
+        // four bits of the mantissa.
+        uint64_t masked = bitwise_cast(d) & 0x7fc0ffffffffffffull;
+        return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull);
+    }
+
+    template
+    static bool canEncodePImmOffset(int32_t offset)
+    {
+        return isValidScaledUImm12(offset);
+    }
+
+    static bool canEncodeSImmOffset(int32_t offset)
+    {
+        return isValidSignedImm9(offset);
+    }
+
+private:
+    int encodeFPImm(double d)
+    {
+        ASSERT(canEncodeFPImm(d));
+        uint64_t u64 = bitwise_cast(d);
+        return (static_cast(u64 >> 56) & 0x80) | (static_cast(u64 >> 48) & 0x7f);
+    }
+
+    template
+    int encodeShiftAmount(int amount)
+    {
+        ASSERT(!amount || datasize == (8 << amount));
+        return amount;
+    }
+
+    template
+    static int encodePositiveImmediate(unsigned pimm)
+    {
+        ASSERT(!(pimm & ((datasize / 8) - 1)));
+        return pimm / (datasize / 8);
+    }
+
+    enum Datasize {
+        Datasize_32,
+        Datasize_64,
+        Datasize_64_top,
+        Datasize_16
+    };
+
+    enum MemOpSize {
+        MemOpSize_8_or_128,
+        MemOpSize_16,
+        MemOpSize_32,
+        MemOpSize_64,
+    };
+
+    enum BranchType {
+        BranchType_JMP,
+        BranchType_CALL,
+        BranchType_RET
+    };
+
+    enum AddOp {
+        AddOp_ADD,
+        AddOp_SUB
+    };
+
+    enum BitfieldOp {
+        BitfieldOp_SBFM,
+        BitfieldOp_BFM,
+        BitfieldOp_UBFM
+    };
+
+    enum DataOp1Source {
+        DataOp_RBIT,
+        DataOp_REV16,
+        DataOp_REV32,
+        DataOp_REV64,
+        DataOp_CLZ,
+        DataOp_CLS
+    };
+
+    enum DataOp2Source {
+        DataOp_UDIV = 2,
+        DataOp_SDIV = 3,
+        DataOp_LSLV = 8,
+        DataOp_LSRV = 9,
+        DataOp_ASRV = 10,
+        DataOp_RORV = 11
+    };
+
+    enum DataOp3Source {
+        DataOp_MADD = 0,
+        DataOp_MSUB = 1,
+        DataOp_SMADDL = 2,
+        DataOp_SMSUBL = 3,
+        DataOp_SMULH = 4,
+        DataOp_UMADDL = 10,
+        DataOp_UMSUBL = 11,
+        DataOp_UMULH = 12
+    };
+
+    enum ExcepnOp {
+        ExcepnOp_EXCEPTION = 0,
+        ExcepnOp_BREAKPOINT = 1,
+        ExcepnOp_HALT = 2,
+        ExcepnOp_DCPS = 5
+    };
+
+    enum FPCmpOp {
+        FPCmpOp_FCMP = 0x00,
+        FPCmpOp_FCMP0 = 0x08,
+        FPCmpOp_FCMPE = 0x10,
+        FPCmpOp_FCMPE0 = 0x18
+    };
+
+    enum FPCondCmpOp {
+        FPCondCmpOp_FCMP,
+        FPCondCmpOp_FCMPE
+    };
+
+    enum FPDataOp1Source {
+        FPDataOp_FMOV = 0,
+        FPDataOp_FABS = 1,
+        FPDataOp_FNEG = 2,
+        FPDataOp_FSQRT = 3,
+        FPDataOp_FCVT_toSingle = 4,
+        FPDataOp_FCVT_toDouble = 5,
+        FPDataOp_FCVT_toHalf = 7,
+        FPDataOp_FRINTN = 8,
+        FPDataOp_FRINTP = 9,
+        FPDataOp_FRINTM = 10,
+        FPDataOp_FRINTZ = 11,
+        FPDataOp_FRINTA = 12,
+        FPDataOp_FRINTX = 14,
+        FPDataOp_FRINTI = 15
+    };
+
+    enum FPDataOp2Source {
+        FPDataOp_FMUL,
+        FPDataOp_FDIV,
+        FPDataOp_FADD,
+        FPDataOp_FSUB,
+        FPDataOp_FMAX,
+        FPDataOp_FMIN,
+        FPDataOp_FMAXNM,
+        FPDataOp_FMINNM,
+        FPDataOp_FNMUL
+    };
+
+    enum SIMD3Same {
+        SIMD_LogicalOp_AND = 0x03
+    };
+
+    enum FPIntConvOp {
+        FPIntConvOp_FCVTNS = 0x00,
+        FPIntConvOp_FCVTNU = 0x01,
+        FPIntConvOp_SCVTF = 0x02,
+        FPIntConvOp_UCVTF = 0x03,
+        FPIntConvOp_FCVTAS = 0x04,
+        FPIntConvOp_FCVTAU = 0x05,
+        FPIntConvOp_FMOV_QtoX = 0x06,
+        FPIntConvOp_FMOV_XtoQ = 0x07,
+        FPIntConvOp_FCVTPS = 0x08,
+        FPIntConvOp_FCVTPU = 0x09,
+        FPIntConvOp_FMOV_QtoX_top = 0x0e,
+        FPIntConvOp_FMOV_XtoQ_top = 0x0f,
+        FPIntConvOp_FCVTMS = 0x10,
+        FPIntConvOp_FCVTMU = 0x11,
+        FPIntConvOp_FCVTZS = 0x18,
+        FPIntConvOp_FCVTZU = 0x19,
+    };
+
+    enum LogicalOp {
+        LogicalOp_AND,
+        LogicalOp_ORR,
+        LogicalOp_EOR,
+        LogicalOp_ANDS
+    };
+
+    enum MemOp {
+        MemOp_STORE,
+        MemOp_LOAD,
+        MemOp_STORE_V128, 
+        MemOp_LOAD_V128,
+        MemOp_PREFETCH = 2, // size must be 3
+        MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2
+        MemOp_LOAD_signed32 = 3 // size may be 0 or 1
+    };
+
+    enum MemPairOpSize {
+        MemPairOp_32 = 0,
+        MemPairOp_LoadSigned_32 = 1,
+        MemPairOp_64 = 2,
+
+        MemPairOp_V32 = MemPairOp_32,
+        MemPairOp_V64 = 1,
+        MemPairOp_V128 = 2
+    };
+
+    enum MoveWideOp {
+        MoveWideOp_N = 0,
+        MoveWideOp_Z = 2,
+        MoveWideOp_K = 3 
+    };
+
+    enum LdrLiteralOp {
+        LdrLiteralOp_32BIT = 0,
+        LdrLiteralOp_64BIT = 1,
+        LdrLiteralOp_LDRSW = 2,
+        LdrLiteralOp_128BIT = 2
+    };
+
+    static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
+    {
+        // return the log2 of the size in bytes, e.g. 64 bit size returns 3
+        if (V)
+            return size + 2;
+        return (size >> 1) + 2;
+    }
+
+public:
+    // Integer Instructions:
+
+    template
+    ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!shift || shift == 12);
+        insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        add(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        if (isSp(rd) || isSp(rn)) {
+            ASSERT(shift == LSL);
+            ASSERT(!isSp(rm));
+            add(rd, rn, rm, UXTX, amount);
+        } else
+            insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
+    }
+
+    ALWAYS_INLINE void adr(RegisterID rd, int offset)
+    {
+        insn(pcRelative(false, offset, rd));
+    }
+
+    ALWAYS_INLINE void adrp(RegisterID rd, int offset)
+    {
+        ASSERT(!(offset & 0xfff));
+        insn(pcRelative(true, offset >> 12, rd));
+        nopCortexA53Fix843419();
+    }
+
+    template
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        and_(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        sbfm(rd, rn, shift, datasize - 1);
+    }
+
+    template
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        asrv(rd, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd));
+    }
+
+    ALWAYS_INLINE void b(int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        ASSERT(offset == (offset << 6) >> 6);
+        insn(unconditionalBranchImmediate(false, offset));
+    }
+
+    ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        ASSERT(offset == (offset << 13) >> 13);
+        insn(conditionalBranchImmediate(offset, cond));
+    }
+
+    template
+    ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        bfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        bfm(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        bic(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd));
+    }
+
+    ALWAYS_INLINE void bl(int32_t offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(unconditionalBranchImmediate(true, offset));
+    }
+
+    ALWAYS_INLINE void blr(RegisterID rn)
+    {
+        insn(unconditionalBranchRegister(BranchType_CALL, rn));
+    }
+
+    ALWAYS_INLINE void br(RegisterID rn)
+    {
+        insn(unconditionalBranchRegister(BranchType_JMP, rn));
+    }
+
+    ALWAYS_INLINE void brk(uint16_t imm)
+    {
+        insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0));
+    }
+    
+    template
+    ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(compareAndBranchImmediate(DATASIZE, true, offset, rt));
+    }
+
+    template
+    ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(compareAndBranchImmediate(DATASIZE, false, offset, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csinc(rd, rn, rn, invert(cond));
+    }
+
+    template
+    ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csinv(rd, rn, rn, invert(cond));
+    }
+
+    template
+    ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        add(ARM64Registers::zr, rn, imm12, shift);
+    }
+
+    template
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm)
+    {
+        add(ARM64Registers::zr, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        add(ARM64Registers::zr, rn, rm, extend, amount);
+    }
+
+    template
+    ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        add(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template
+    ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        sub(ARM64Registers::zr, rn, imm12, shift);
+    }
+
+    template
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+    {
+        sub(ARM64Registers::zr, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        sub(ARM64Registers::zr, rn, rm, extend, amount);
+    }
+
+    template
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        sub(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template
+    ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond)
+    {
+        csneg(rd, rn, rn, invert(cond));
+    }
+
+    template
+    ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void cset(RegisterID rd, Condition cond)
+    {
+        csinc(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+    }
+
+    template
+    ALWAYS_INLINE void csetm(RegisterID rd, Condition cond)
+    {
+        csinv(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond));
+    }
+
+    template
+    ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        eon(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        eor(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd));
+    }
+    
+    template
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb)
+    {
+        CHECK_DATASIZE();
+        insn(extract(DATASIZE, rm, lsb, rn, rd));
+    }
+
+    ALWAYS_INLINE void hint(int imm)
+    {
+        insn(hintPseudo(imm));
+    }
+
+    ALWAYS_INLINE void hlt(uint16_t imm)
+    {
+        insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
+    }
+
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        insn(0x0);
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldr(rt, rn, rm, UXTX, 0);
+    }
+
+    template
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount(amount), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrh(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrsh(rt, rn, rm, UXTX, 0);
+    }
+
+    template
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldrsw(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 2);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        ubfm(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift);
+    }
+
+    template
+    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        lslv(rd, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift)
+    {
+        ASSERT(shift < datasize);
+        ubfm(rd, rn, shift, datasize - 1);
+    }
+
+    template
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        lsrv(rd, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        CHECK_DATASIZE();
+        nopCortexA53Fix835769();
+        insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        msub(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template
+    ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+    {
+        if (isSp(rd) || isSp(rm))
+            add(rd, rm, UInt12(0));
+        else
+            orr(rd, ARM64Registers::zr, rm);
+    }
+
+    template
+    ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm)
+    {
+        orr(rd, ARM64Registers::zr, imm);
+    }
+
+    template
+    ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd));
+    }
+
+    template
+    ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd));
+    }
+
+    template
+    ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!(shift & 0xf));
+        insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd));
+    }
+
+    template
+    ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        CHECK_DATASIZE();
+        nopCortexA53Fix835769();
+        insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        madd(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+    {
+        orn(rd, ARM64Registers::zr, rm);
+    }
+
+    template
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        orn(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    template
+    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+    {
+        sub(rd, ARM64Registers::zr, rm);
+    }
+
+    template
+    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        sub(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    template
+    ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm)
+    {
+        sbc(rd, ARM64Registers::zr, rm);
+    }
+
+    template
+    ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount)
+    {
+        sbc(rd, ARM64Registers::zr, rm, shift, amount);
+    }
+
+    ALWAYS_INLINE void nop()
+    {
+        insn(nopPseudo());
+    }
+    
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+    {
+        RELEASE_ASSERT(!(size % sizeof(int32_t)));
+        size_t n = size / sizeof(int32_t);
+        for (int32_t* ptr = static_cast(base); n--;) {
+            int insn = nopPseudo();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr++, &insn, sizeof(int));
+            else
+                memcpy(ptr++, &insn, sizeof(int));
+        }
+    }
+    
+    ALWAYS_INLINE void dmbISH()
+    {
+        insn(0xd5033bbf);
+    }
+
+    ALWAYS_INLINE void dmbISHST()
+    {
+        insn(0xd5033abf);
+    }
+
+    template
+    ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        orn(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        orr(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm)
+    {
+        CHECK_DATASIZE();
+        insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd));
+    }
+
+    ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr)
+    {
+        insn(unconditionalBranchRegister(BranchType_RET, rn));
+    }
+
+    template
+    ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width.
+            insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd));
+        else
+            insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn)
+    {
+        ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands.
+        insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        rorv(rd, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift)
+    {
+        extr(rd, rs, rs, shift);
+    }
+
+    template
+    ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        sbfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        sbfm(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd));
+    }
+
+    ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        nopCortexA53Fix835769<64>();
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        smsubl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        nopCortexA53Fix835769<64>();
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd));
+    }
+
+    ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        smaddl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void stnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2));
+    }
+
+    template
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        str(rt, rn, rm, UXTX, 0);
+    }
+
+    template
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount(amount), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        // Not calling the 5 argument form of strb, since is amount is ommitted S is false.
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT_UNUSED(amount, !amount);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        strh(rt, rn, rm, UXTX, 0);
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        ASSERT(!amount || amount == 1);
+        insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm)
+    {
+        insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!shift || shift == 12);
+        insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
+        ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
+
+        if (isSp(rd) || isSp(rn))
+            sub(rd, rn, rm, UXTX, 0);
+        else
+            sub(rd, rn, rm, LSL, 0);
+    }
+
+    template
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_DATASIZE();
+        insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        CHECK_DATASIZE();
+        ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
+        insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn)
+    {
+        sbfm(rd, rn, 0, 7);
+    }
+
+    template
+    ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn)
+    {
+        sbfm(rd, rn, 0, 15);
+    }
+
+    ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn)
+    {
+        sbfm<64>(rd, rn, 0, 31);
+    }
+
+    ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(testAndBranchImmediate(false, imm, offset, rt));
+    }
+
+    ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0)
+    {
+        ASSERT(!(offset & 3));
+        offset >>= 2;
+        insn(testAndBranchImmediate(true, imm, offset, rt));
+    }
+
+    template
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+    {
+        and_(ARM64Registers::zr, rn, rm);
+    }
+
+    template
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount)
+    {
+        and_(ARM64Registers::zr, rn, rm, shift, amount);
+    }
+
+    template
+    ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm)
+    {
+        and_(ARM64Registers::zr, rn, imm);
+    }
+
+    template
+    ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        ubfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms)
+    {
+        CHECK_DATASIZE();
+        insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width)
+    {
+        ubfm(rd, rn, lsb, lsb + width - 1);
+    }
+
+    template
+    ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        CHECK_DATASIZE();
+        insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd));
+    }
+
+    ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        nopCortexA53Fix835769<64>();
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        umsubl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
+    {
+        nopCortexA53Fix835769<64>();
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
+    }
+
+    ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd));
+    }
+
+    ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        umaddl(rd, rn, rm, ARM64Registers::zr);
+    }
+
+    template
+    ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn)
+    {
+        ubfm(rd, rn, 0, 7);
+    }
+
+    template
+    ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn)
+    {
+        ubfm(rd, rn, 0, 15);
+    }
+
+    ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn)
+    {
+        ubfm<64>(rd, rn, 0, 31);
+    }
+
+    // Floating Point Instructions:
+
+    template
+    ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv));
+    }
+
+    template
+    ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP));
+    }
+
+    template
+    ALWAYS_INLINE void fcmp_0(FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, static_cast(0), vn, FPCmpOp_FCMP0));
+    }
+
+    template
+    ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE));
+    }
+
+    template
+    ALWAYS_INLINE void fcmpe_0(FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointCompare(DATASIZE, static_cast(0), vn, FPCmpOp_FCMPE0));
+    }
+
+    template
+    ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn)
+    {
+        ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64);
+        ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64);
+        ASSERT(dstsize != srcsize);
+        Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16;
+        FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf;
+        insn(floatingPointDataProcessing1Source(type, opcode, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fmov(FPRegisterID vd, double imm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd));
+    }
+
+    ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn)
+    {
+        insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd));
+    }
+
+    ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn)
+    {
+        insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd));
+    }
+
+    template
+    ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_VECTOR_DATASIZE();
+        insn(vectorDataProcessing2Source(SIMD_LogicalOp_AND, vm, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm)
+    {
+        CHECK_DATASIZE();
+        insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        ldr(rt, rn, rm, UXTX, 0);
+    }
+
+    template
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount(amount), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        ASSERT(datasize >= 32);
+        ASSERT(!(offset & 3));
+        insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd));
+    }
+
+    template
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm)
+    {
+        str(rt, rn, rm, UXTX, 0);
+    }
+
+    template
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount(amount), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate(pimm), rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm)
+    {
+        CHECK_FP_MEMOP_DATASIZE();
+        insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm)
+    {
+        CHECK_DATASIZE();
+        insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt));
+    }
+
+    template
+    ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn)
+    {
+        CHECK_DATASIZE_OF(dstsize);
+        CHECK_DATASIZE_OF(srcsize);
+        insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd));
+    }
+
+    // Admin methods:
+
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        return m_buffer.label();
+    }
+
+    AssemblerLabel labelForWatchpoint()
+    {
+        AssemblerLabel result = m_buffer.label();
+        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = m_buffer.label();
+        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            nop();
+            result = m_buffer.label();
+        }
+        return result;
+    }
+
+    AssemblerLabel align(int alignment)
+    {
+        ASSERT(!(alignment & 3));
+        while (!m_buffer.isAligned(alignment))
+            brk(0);
+        return label();
+    }
+    
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        ASSERT(label.isSet());
+        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+    }
+    
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+
+    void* unlinkedCode() { return m_buffer.data(); }
+    size_t codeSize() const { return m_buffer.codeSize(); }
+
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        ASSERT(call.isSet());
+        return call.m_offset;
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister));
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister));
+    }
+
+    void linkJump(AssemblerLabel from, void* executableCode, AssemblerLabel to)
+    {
+        ASSERT(from.isSet());
+        ASSERT(to.isSet());
+        relinkJumpOrCall(addressOf(from), addressOf(executableCode, from), addressOf(to));
+    }
+    
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        relinkJumpOrCall(addressOf(code, from), addressOf(code, from), to);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        linkJumpOrCall(addressOf(code, from) - 1, addressOf(code, from) - 1, to);
+    }
+
+    static void linkPointer(void* code, AssemblerLabel where, void* valuePtr)
+    {
+        linkPointer(addressOf(code, where), valuePtr);
+    }
+
+    static void replaceWithJump(void* where, void* to)
+    {
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(where)) >> 2;
+        ASSERT(static_cast(offset) == offset);
+        int insn = unconditionalBranchImmediate(false, static_cast(offset));
+        performJITMemcpy(where, &insn, sizeof(int));
+        cacheFlush(where, sizeof(int));
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return 4;
+    }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 4;
+    }
+    
+    static void replaceWithLoad(void* where)
+    {
+        Datasize sf;
+        AddOp op;
+        SetFlags S;
+        int shift;
+        int imm12;
+        RegisterID rn;
+        RegisterID rd;
+        if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) {
+            ASSERT(sf == Datasize_64);
+            ASSERT(op == AddOp_ADD);
+            ASSERT(!S);
+            ASSERT(!shift);
+            ASSERT(!(imm12 & ~0xff8));
+            int insn = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd);
+            performJITMemcpy(where, &insn, sizeof(int));
+            cacheFlush(where, sizeof(int));
+        }
+#if !ASSERT_DISABLED
+        else {
+            MemOpSize size;
+            bool V;
+            MemOp opc;
+            int imm12;
+            RegisterID rn;
+            RegisterID rt;
+            ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt));
+            ASSERT(size == MemOpSize_64);
+            ASSERT(!V);
+            ASSERT(opc == MemOp_LOAD);
+            ASSERT(!(imm12 & ~0x1ff));
+        }
+#endif
+    }
+
+    static void replaceWithAddressComputation(void* where)
+    {
+        MemOpSize size;
+        bool V;
+        MemOp opc;
+        int imm12;
+        RegisterID rn;
+        RegisterID rt;
+        if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) {
+            ASSERT(size == MemOpSize_64);
+            ASSERT(!V);
+            ASSERT(opc == MemOp_LOAD);
+            ASSERT(!(imm12 & ~0x1ff));
+            int insn = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt);
+            performJITMemcpy(where, &insn, sizeof(int));
+            cacheFlush(where, sizeof(int));
+        }
+#if !ASSERT_DISABLED
+        else {
+            Datasize sf;
+            AddOp op;
+            SetFlags S;
+            int shift;
+            int imm12;
+            RegisterID rn;
+            RegisterID rd;
+            ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd));
+            ASSERT(sf == Datasize_64);
+            ASSERT(op == AddOp_ADD);
+            ASSERT(!S);
+            ASSERT(!shift);
+            ASSERT(!(imm12 & ~0xff8));
+        }
+#endif
+    }
+
+    static void repatchPointer(void* where, void* valuePtr)
+    {
+        linkPointer(static_cast(where), valuePtr, true);
+    }
+
+    static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush)
+    {
+        uintptr_t value = reinterpret_cast(valuePtr);
+        int buffer[3];
+        buffer[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+        buffer[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        buffer[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd);
+        performJITMemcpy(address, buffer, sizeof(int) * 3);
+
+        if (flush)
+            cacheFlush(address, sizeof(int) * 3);
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        int* address = static_cast(where);
+
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw);
+        ASSERT(checkMovk(address[1], 1, rd));
+
+        int buffer[2];
+        if (value >= 0) {
+            buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd);
+            buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        } else {
+            buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd);
+            buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd);
+        }
+        performJITMemcpy(where, &buffer, sizeof(int) * 2);
+
+        cacheFlush(where, sizeof(int) * 2);
+    }
+
+    static void* readPointer(void* where)
+    {
+        int* address = static_cast(where);
+
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rdFirst, rd;
+
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+        uintptr_t result = imm16;
+
+        expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst);
+        result |= static_cast(imm16) << 16;
+
+        expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst);
+        result |= static_cast(imm16) << 32;
+
+        return reinterpret_cast(result);
+    }
+
+    static void* readCallTarget(void* from)
+    {
+        return readPointer(reinterpret_cast(from) - 4);
+    }
+
+    // The static relink, repatch, and replace methods can use can
+    // use |from| for both the write and executable address for call
+    // and jump patching as they're modifying existing (linked) code,
+    // so the address being provided is correct for relative address
+    // computation.
+    static void relinkJump(void* from, void* to)
+    {
+        relinkJumpOrCall(reinterpret_cast(from), reinterpret_cast(from), to);
+        cacheFlush(from, sizeof(int));
+    }
+    
+    static void relinkJumpToNop(void* from)
+    {
+        relinkJump(from, static_cast(from) + 4);
+    }
+    
+    static void relinkCall(void* from, void* to)
+    {
+        relinkJumpOrCall(reinterpret_cast(from) - 1, reinterpret_cast(from) - 1, to);
+        cacheFlush(reinterpret_cast(from) - 1, sizeof(int));
+    }
+    
+    static void repatchCompact(void* where, int32_t value)
+    {
+        ASSERT(!(value & ~0x3ff8));
+
+        MemOpSize size;
+        bool V;
+        MemOp opc;
+        int imm12;
+        RegisterID rn;
+        RegisterID rt;
+        bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt);
+        ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR.
+
+        if (size == MemOpSize_32)
+            imm12 = encodePositiveImmediate<32>(value);
+        else
+            imm12 = encodePositiveImmediate<64>(value);
+        int insn = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt);
+        performJITMemcpy(where, &insn, sizeof(int));
+
+        cacheFlush(where, sizeof(int));
+    }
+
+    unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+    static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+    {
+        __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end));
+    }
+#endif
+
+    static void cacheFlush(void* code, size_t size)
+    {
+#if OS(IOS)
+        sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+        size_t page = pageSize();
+        uintptr_t current = reinterpret_cast(code);
+        uintptr_t end = current + size;
+        uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+        if (end <= firstPageEnd) {
+            linuxPageFlush(current, end);
+            return;
+        }
+
+        linuxPageFlush(current, firstPageEnd);
+
+        for (current = firstPageEnd; current + page < end; current += page)
+            linuxPageFlush(current, current + page);
+
+        linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+    }
+
+    // Assembler admin methods:
+
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+    static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+    {
+        return a.from() < b.from();
+    }
+
+    static bool canCompact(JumpType jumpType)
+    {
+        // Fixed jumps cannot be compacted
+        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
+    }
+
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    {
+        switch (jumpType) {
+        case JumpFixed:
+            return LinkInvalid;
+        case JumpNoConditionFixedSize:
+            return LinkJumpNoCondition;
+        case JumpConditionFixedSize:
+            return LinkJumpCondition;
+        case JumpCompareAndBranchFixedSize:
+            return LinkJumpCompareAndBranch;
+        case JumpTestBitFixedSize:
+            return LinkJumpTestBit;
+        case JumpNoCondition:
+            return LinkJumpNoCondition;
+        case JumpCondition: {
+            ASSERT(!(reinterpret_cast(from) & 0x3));
+            ASSERT(!(reinterpret_cast(to) & 0x3));
+            intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from));
+
+            if (((relative << 43) >> 43) == relative)
+                return LinkJumpConditionDirect;
+
+            return LinkJumpCondition;
+            }
+        case JumpCompareAndBranch:  {
+            ASSERT(!(reinterpret_cast(from) & 0x3));
+            ASSERT(!(reinterpret_cast(to) & 0x3));
+            intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from));
+
+            if (((relative << 43) >> 43) == relative)
+                return LinkJumpCompareAndBranchDirect;
+
+            return LinkJumpCompareAndBranch;
+        }
+        case JumpTestBit:   {
+            ASSERT(!(reinterpret_cast(from) & 0x3));
+            ASSERT(!(reinterpret_cast(to) & 0x3));
+            intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from));
+
+            if (((relative << 50) >> 50) == relative)
+                return LinkJumpTestBitDirect;
+
+            return LinkJumpTestBit;
+        }
+        default:
+            ASSERT_NOT_REACHED();
+        }
+
+        return LinkJumpNoCondition;
+    }
+
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    {
+        JumpLinkType linkType = computeJumpType(record.type(), from, to);
+        record.setLinkType(linkType);
+        return linkType;
+    }
+
+    Vector& jumpsToLink()
+    {
+        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+        return m_jumpsToLink;
+    }
+
+    static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
+    {
+        const int* fromInstruction = reinterpret_cast(fromInstruction8);
+        switch (record.linkType()) {
+        case LinkJumpNoCondition:
+            linkJumpOrCall(reinterpret_cast(from), fromInstruction, to);
+            break;
+        case LinkJumpConditionDirect:
+            linkConditionalBranch(record.condition(), reinterpret_cast(from), fromInstruction, to);
+            break;
+        case LinkJumpCondition:
+            linkConditionalBranch(record.condition(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
+            break;
+        case LinkJumpCompareAndBranchDirect:
+            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from), fromInstruction, to);
+            break;
+        case LinkJumpCompareAndBranch:
+            linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
+            break;
+        case LinkJumpTestBitDirect:
+            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from), fromInstruction, to);
+            break;
+        case LinkJumpTestBit:
+            linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from) - 1, fromInstruction - 1, to);
+            break;
+        default:
+            ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+private:
+    template
+    static bool checkMovk(int insn, int _hw, RegisterID _rd)
+    {
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
+
+        return expected
+            && sf == size
+            && opc == MoveWideOp_K
+            && hw == _hw
+            && rd == _rd;
+    }
+
+    static void linkPointer(int* address, void* valuePtr, bool flush = false)
+    {
+        Datasize sf;
+        MoveWideOp opc;
+        int hw;
+        uint16_t imm16;
+        RegisterID rd;
+        bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd);
+        ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw);
+        ASSERT(checkMovk(address[1], 1, rd));
+        ASSERT(checkMovk(address[2], 2, rd));
+
+        setPointer(address, valuePtr, rd, flush);
+    }
+
+    template
+    static void linkJumpOrCall(int* from, const int* fromInstruction, void* to)
+    {
+        bool link;
+        int imm26;
+        bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from);
+
+        ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop);
+        ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from));
+        ASSERT(!(reinterpret_cast(from) & 3));
+        ASSERT(!(reinterpret_cast(to) & 3));
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
+        ASSERT(static_cast(offset) == offset);
+
+        int insn = unconditionalBranchImmediate(isCall, static_cast(offset));
+        performJITMemcpy(from, &insn, sizeof(int));
+    }
+
+    template
+    static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to)
+    {
+        ASSERT(!(reinterpret_cast(from) & 3));
+        ASSERT(!(reinterpret_cast(to) & 3));
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast(offset), rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
+        } else {
+            int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
+        }
+    }
+
+    template
+    static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to)
+    {
+        ASSERT(!(reinterpret_cast(from) & 3));
+        ASSERT(!(reinterpret_cast(to) & 3));
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            int insn = conditionalBranchImmediate(static_cast(offset), condition);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
+        } else {
+            int insn = conditionalBranchImmediate(2, invert(condition));
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
+        }
+    }
+
+    template
+    static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to)
+    {
+        ASSERT(!(reinterpret_cast(from) & 3));
+        ASSERT(!(reinterpret_cast(to) & 3));
+        intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(fromInstruction)) >> 2;
+        ASSERT(static_cast(offset) == offset);
+        ASSERT(((offset << 38) >> 38) == offset);
+
+        bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits
+        ASSERT(!isDirect || useDirect);
+
+        if (useDirect || isDirect) {
+            int insn = testAndBranchImmediate(condition == ConditionNE, static_cast(bitNumber), static_cast(offset), rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            if (!isDirect) {
+                insn = nopPseudo();
+                performJITMemcpy(from + 1, &insn, sizeof(int));
+            }
+        } else {
+            int insn = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast(bitNumber), 2, rt);
+            performJITMemcpy(from, &insn, sizeof(int));
+            linkJumpOrCall(from + 1, fromInstruction + 1, to);
+        }
+    }
+
+    template
+    static void relinkJumpOrCall(int* from, const int* fromInstruction, void* to)
+    {
+        if (!isCall && disassembleNop(from)) {
+            unsigned op01;
+            int imm19;
+            Condition condition;
+            bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition);
+
+            if (isConditionalBranchImmediate) {
+                ASSERT_UNUSED(op01, !op01);
+                ASSERT_UNUSED(isCall, !isCall);
+
+                if (imm19 == 8)
+                    condition = invert(condition);
+
+                linkConditionalBranch(condition, from - 1, fromInstruction - 1, to);
+                return;
+            }
+
+            Datasize opSize;
+            bool op;
+            RegisterID rt;
+            bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt);
+
+            if (isCompareAndBranchImmediate) {
+                if (imm19 == 8)
+                    op = !op;
+
+                linkCompareAndBranch(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, fromInstruction - 1, to);
+                return;
+            }
+
+            int imm14;
+            unsigned bitNumber;
+            bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt);
+
+            if (isTestAndBranchImmediate) {
+                if (imm14 == 8)
+                    op = !op;
+
+                linkTestAndBranch(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, fromInstruction - 1, to);
+                return;
+            }
+        }
+
+        linkJumpOrCall(from, fromInstruction, to);
+    }
+
+    static int* addressOf(void* code, AssemblerLabel label)
+    {
+        return reinterpret_cast(static_cast(code) + label.m_offset);
+    }
+
+    int* addressOf(AssemblerLabel label)
+    {
+        return addressOf(m_buffer.data(), label);
+    }
+
+    static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast(reg); }
+    static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast(reg); }
+    static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast(reg); }
+
+    static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
+    {
+        int insn = *static_cast(address);
+        sf = static_cast((insn >> 31) & 1);
+        op = static_cast((insn >> 30) & 1);
+        S = static_cast((insn >> 29) & 1);
+        shift = (insn >> 22) & 3;
+        imm12 = (insn >> 10) & 0x3ff;
+        rn = disassembleXOrSp((insn >> 5) & 0x1f);
+        rd = disassembleXOrZrOrSp(S, insn & 0x1f);
+        return (insn & 0x1f000000) == 0x11000000;
+    }
+
+    static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
+    {
+        int insn = *static_cast(address);
+        size = static_cast((insn >> 30) & 3);
+        V = (insn >> 26) & 1;
+        opc = static_cast((insn >> 22) & 3);
+        imm12 = (insn >> 10) & 0xfff;
+        rn = disassembleXOrSp((insn >> 5) & 0x1f);
+        rt = disassembleXOrZr(insn & 0x1f);
+        return (insn & 0x3b000000) == 0x39000000;
+    }
+
+    static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
+    {
+        int insn = *static_cast(address);
+        sf = static_cast((insn >> 31) & 1);
+        opc = static_cast((insn >> 29) & 3);
+        hw = (insn >> 21) & 3;
+        imm16 = insn >> 5;
+        rd = disassembleXOrZr(insn & 0x1f);
+        return (insn & 0x1f800000) == 0x12800000;
+    }
+
+    static bool disassembleNop(void* address)
+    {
+        unsigned insn = *static_cast(address);
+        return insn == 0xd503201f;
+    }
+
+    static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
+    {
+        int insn = *static_cast(address);
+        sf = static_cast((insn >> 31) & 1);
+        op = (insn >> 24) & 0x1;
+        imm19 = (insn << 8) >> 13;
+        rt = static_cast(insn & 0x1f);
+        return (insn & 0x7e000000) == 0x34000000;
+        
+    }
+
+    static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
+    {
+        int insn = *static_cast(address);
+        op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
+        imm19 = (insn << 8) >> 13;
+        condition = static_cast(insn & 0xf);
+        return (insn & 0xfe000000) == 0x54000000;
+    }
+
+    static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
+    {
+        int insn = *static_cast(address);
+        op = (insn >> 24) & 0x1;
+        imm14 = (insn << 13) >> 18;
+        bitNumber = static_cast((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
+        rt = static_cast(insn & 0x1f);
+        return (insn & 0x7e000000) == 0x36000000;
+        
+    }
+
+    static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
+    {
+        int insn = *static_cast(address);
+        op = (insn >> 31) & 1;
+        imm26 = (insn << 6) >> 6;
+        return (insn & 0x7c000000) == 0x14000000;
+    }
+
+    static int xOrSp(RegisterID reg)
+    {
+        ASSERT(!isZr(reg));
+        ASSERT(!isIOS() || reg != ARM64Registers::x18);
+        return reg;
+    }
+    static int xOrZr(RegisterID reg)
+    {
+        ASSERT(!isSp(reg));
+        ASSERT(!isIOS() || reg != ARM64Registers::x18);
+        return reg & 31;
+    }
+    static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast(xOrZr(reg)); }
+    static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); }
+
+    ALWAYS_INLINE void insn(int instruction)
+    {
+        m_buffer.putInt(instruction);
+    }
+
+    ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(imm3 < 5);
+        // The only allocated values for opt is 0.
+        const int opt = 0;
+        return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(shift < 2);
+        ASSERT(isUInt12(imm12));
+        return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(shift < 3);
+        ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+        return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd)
+    {
+        const int opcode2 = 0;
+        return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(immr < (sf ? 64 : 32));
+        ASSERT(imms < (sf ? 64 : 32));
+        const int N = sf;
+        return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    // 'op' means negate
+    ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt)
+    {
+        ASSERT(imm19 == (imm19 << 13) >> 13);
+        return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond)
+    {
+        ASSERT(imm19 == (imm19 << 13) >> 13);
+        ASSERT(!(cond & ~15));
+        // The only allocated values for o1 & o0 are 0.
+        const int o1 = 0;
+        const int o0 = 0;
+        return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond);
+    }
+
+    ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv)
+    {
+        ASSERT(!(imm5 & ~0x1f));
+        ASSERT(nzcv < 16);
+        const int S = 1;
+        const int o2 = 0;
+        const int o3 = 0;
+        return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+    }
+
+    ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv)
+    {
+        ASSERT(nzcv < 16);
+        const int S = 1;
+        const int o2 = 0;
+        const int o3 = 0;
+        return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv);
+    }
+
+    // 'op' means negate
+    // 'op2' means increment
+    ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        const int opcode2 = 0;
+        return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd)
+    {
+        const int S = 0;
+        return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd)
+    {
+        int op54 = opcode >> 4;
+        int op31 = (opcode >> 1) & 7;
+        int op0 = opcode & 1;
+        return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL)
+    {
+        ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4)));
+        const int op2 = 0;
+        return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL);
+    }
+
+    ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(imms < (sf ? 64 : 32));
+        const int op21 = 0;
+        const int N = sf;
+        const int o0 = 0;
+        return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2)
+    {
+        const int M = 0;
+        const int S = 0;
+        const int op = 0;
+        return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2);
+    }
+
+    ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv)
+    {
+        ASSERT(nzcv < 16);
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv);
+    }
+
+    ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        const int imm5 = 0;
+        return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int S = 0;
+        return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd)
+    {
+        return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd));
+    }
+
+    ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd)
+    {
+        return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd);
+    }
+
+    ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, unsigned size, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd)
+    {
+        const int Q = 0;
+        return (0xe201c00 | Q << 30 | size << 22 | vm << 16 | opcode << 11 | vn << 5 | vd);
+    }
+
+    ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd)
+    {
+        return vectorDataProcessing2Source(opcode, 0, vm, vn, vd);
+    }
+
+
+    // 'o1' means negate
+    ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd)
+    {
+        const int M = 0;
+        const int S = 0;
+        return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd);
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt)
+    {
+        ASSERT(((imm19 << 13) >> 13) == imm19);
+        return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt)
+    {
+        return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x29000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairOffset(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+    {
+        ASSERT(size < 3);
+        ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+        ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+        unsigned immedShiftAmount = memPairOffsetShift(V, size);
+        int imm7 = immediate >> immedShiftAmount;
+        ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+        return (0x28000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+    {
+        return loadStoreRegisterPairNonTemporal(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+    }
+
+    // 'V' means vector
+    // 'S' means shift rm
+    ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit.
+        return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isInt9(imm9));
+        return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt)
+    {
+        ASSERT(isInt9(imm9));
+        return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
+    }
+
+    // 'V' means vector
+    ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt)
+    {
+        ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits.
+        ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits.
+        ASSERT(isUInt12(imm12));
+        return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt);
+    }
+
+    ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt)
+    {
+        return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt));
+    }
+
+    ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff)));
+        return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd));
+    }
+
+    // 'N' means negate rm
+    ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd)
+    {
+        ASSERT(!(imm6 & (sf ? ~63 : ~31)));
+        return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd)
+    {
+        ASSERT(hw < (sf ? 4 : 2));
+        return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd));
+    }
+
+    // 'op' means link
+    ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26)
+    {
+        ASSERT(imm26 == (imm26 << 6) >> 6);
+        return (0x14000000 | op << 31 | (imm26 & 0x3ffffff));
+    }
+
+    // 'op' means page
+    ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd)
+    {
+        ASSERT(imm21 == (imm21 << 11) >> 11);
+        int32_t immlo = imm21 & 3;
+        int32_t immhi = (imm21 >> 2) & 0x7ffff;
+        return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd));
+    }
+
+    ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt)
+    {
+        return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int hintPseudo(int imm)
+    {
+        ASSERT(!(imm & ~0x7f));
+        return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr);
+    }
+
+    ALWAYS_INLINE static int nopPseudo()
+    {
+        return hintPseudo(0);
+    }
+    
+    // 'op' means negate
+    ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt)
+    {
+        ASSERT(!(b50 & ~0x3f));
+        ASSERT(imm14 == (imm14 << 18) >> 18);
+        int b5 = b50 >> 5;
+        int b40 = b50 & 0x1f;
+        return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt));
+    }
+
+    ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn)
+    {
+        // The only allocated values for op2 is 0x1f, for op3 & op4 are 0.
+        const int op2 = 0x1f;
+        const int op3 = 0;
+        const int op4 = 0;
+        return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
+    }
+
+    // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
+    // last instruction in the buffer is a load, store or prefetch. Needed
+    // before 64-bit multiply-accumulate instructions.
+    template
+    ALWAYS_INLINE void nopCortexA53Fix835769()
+    {
+#if CPU(ARM64_CORTEXA53)
+        CHECK_DATASIZE();
+        if (datasize == 64) {
+            if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) {
+                // From ARMv8 Reference Manual, Section C4.1: the encoding of the
+                // instructions in the Loads and stores instruction group is:
+                // ---- 1-0- ---- ---- ---- ---- ---- ----
+                if (UNLIKELY((*reinterpret_cast_ptr(reinterpret_cast_ptr(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
+                    nop();
+            }
+        }
+#endif
+    }
+
+    // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
+    // wrong address access after ADRP instruction.
+    ALWAYS_INLINE void nopCortexA53Fix843419()
+    {
+#if CPU(ARM64_CORTEXA53)
+        nop();
+        nop();
+        nop();
+#endif
+    }
+
+    AssemblerBuffer m_buffer;
+    Vector m_jumpsToLink;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#undef CHECK_DATASIZE_OF
+#undef DATASIZE_OF
+#undef MEMOPSIZE_OF
+#undef CHECK_DATASIZE
+#undef DATASIZE
+#undef MEMOPSIZE
+#undef CHECK_FP_MEMOP_DATASIZE
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
diff --git a/assembler/ARMAssembler.cpp b/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000..552f37f
--- /dev/null
+++ b/assembler/ARMAssembler.cpp
@@ -0,0 +1,423 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+    ARMWord *ldr = reinterpret_cast(loadAddr);
+    ARMWord diff = reinterpret_cast(constPoolAddr) - ldr;
+    ARMWord index = (*ldr & 0xfff) >> 1;
+
+    ASSERT(diff >= 1);
+    if (diff >= 2 || index > 0) {
+        diff = (diff + index - 2) * sizeof(ARMWord);
+        ASSERT(diff <= 0xfff);
+        *ldr = (*ldr & ~0xfff) | diff;
+    } else
+        *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+    int rol;
+
+    if (imm <= 0xff)
+        return Op2Immediate | imm;
+
+    if ((imm & 0xff000000) == 0) {
+        imm <<= 8;
+        rol = 8;
+    }
+    else {
+        imm = (imm << 24) | (imm >> 8);
+        rol = 0;
+    }
+
+    if ((imm & 0xff000000) == 0) {
+        imm <<= 8;
+        rol += 4;
+    }
+
+    if ((imm & 0xf0000000) == 0) {
+        imm <<= 4;
+        rol += 2;
+    }
+
+    if ((imm & 0xc0000000) == 0) {
+        imm <<= 2;
+        rol += 1;
+    }
+
+    if ((imm & 0x00ffffff) == 0)
+        return Op2Immediate | (imm >> 24) | (rol << 8);
+
+    return InvalidImmediate;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+    // Step1: Search a non-immediate part
+    ARMWord mask;
+    ARMWord imm1;
+    ARMWord imm2;
+    int rol;
+
+    mask = 0xff000000;
+    rol = 8;
+    while(1) {
+        if ((imm & mask) == 0) {
+            imm = (imm << rol) | (imm >> (32 - rol));
+            rol = 4 + (rol >> 1);
+            break;
+        }
+        rol += 2;
+        mask >>= 2;
+        if (mask & 0x3) {
+            // rol 8
+            imm = (imm << 8) | (imm >> 24);
+            mask = 0xff00;
+            rol = 24;
+            while (1) {
+                if ((imm & mask) == 0) {
+                    imm = (imm << rol) | (imm >> (32 - rol));
+                    rol = (rol >> 1) - 8;
+                    break;
+                }
+                rol += 2;
+                mask >>= 2;
+                if (mask & 0x3)
+                    return 0;
+            }
+            break;
+        }
+    }
+
+    ASSERT((imm & 0xff) == 0);
+
+    if ((imm & 0xff000000) == 0) {
+        imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+        imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+    } else if (imm & 0xc0000000) {
+        imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+        imm <<= 8;
+        rol += 4;
+
+        if ((imm & 0xff000000) == 0) {
+            imm <<= 8;
+            rol += 4;
+        }
+
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        if ((imm & 0x00ffffff) == 0)
+            imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+        else
+            return 0;
+    } else {
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+        imm <<= 8;
+        rol += 4;
+
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        if ((imm & 0x00ffffff) == 0)
+            imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+        else
+            return 0;
+    }
+
+    if (positive) {
+        mov(reg, imm1);
+        orr(reg, reg, imm2);
+    } else {
+        mvn(reg, imm1);
+        bic(reg, reg, imm2);
+    }
+
+    return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+    ARMWord tmp;
+
+    // Do it by 1 instruction
+    tmp = getOp2(imm);
+    if (tmp != InvalidImmediate)
+        return tmp;
+
+    tmp = getOp2(~imm);
+    if (tmp != InvalidImmediate) {
+        if (invert)
+            return tmp | Op2InvertedImmediate;
+        mvn(tmpReg, tmp);
+        return tmpReg;
+    }
+
+    return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+    ARMWord tmp;
+
+    // Do it by 1 instruction
+    tmp = getOp2(imm);
+    if (tmp != InvalidImmediate) {
+        mov(dest, tmp);
+        return;
+    }
+
+    tmp = getOp2(~imm);
+    if (tmp != InvalidImmediate) {
+        mvn(dest, tmp);
+        return;
+    }
+
+    encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+    ARMWord tmp = getImm16Op2(imm);
+    if (tmp != InvalidImmediate) {
+        movw(dest, tmp);
+        return dest;
+    }
+    movw(dest, getImm16Op2(imm & 0xffff));
+    movt(dest, getImm16Op2(imm >> 16));
+    return dest;
+#else
+    // Do it by 2 instruction
+    if (genInt(dest, imm, true))
+        return dest;
+    if (genInt(dest, ~imm, false))
+        return dest;
+
+    ldrImmediate(dest, imm);
+    return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+    if (offset >= 0) {
+        if (offset <= 0xfff)
+            dtrUp(transferType, srcDst, base, offset);
+        else if (offset <= 0xfffff) {
+            add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
+            dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
+        } else {
+            moveImm(offset, ARMRegisters::S0);
+            dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+        }
+    } else {
+        if (offset >= -0xfff)
+            dtrDown(transferType, srcDst, base, -offset);
+        else if (offset >= -0xfffff) {
+            sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
+            dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
+        } else {
+            moveImm(offset, ARMRegisters::S0);
+            dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+        }
+    }
+}
+
+void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+    ASSERT(scale >= 0 && scale <= 3);
+    ARMWord op2 = lsl(index, scale);
+
+    if (!offset) {
+        dtrUpRegister(transferType, srcDst, base, op2);
+        return;
+    }
+
+    if (offset <= 0xfffff && offset >= -0xfffff) {
+        add(ARMRegisters::S0, base, op2);
+        dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
+        return;
+    }
+
+    moveImm(offset, ARMRegisters::S0);
+    add(ARMRegisters::S0, ARMRegisters::S0, op2);
+    dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+    if (offset >= 0) {
+        if (offset <= 0xff)
+            halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
+        else if (offset <= 0xffff) {
+            add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
+            halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
+        } else {
+            moveImm(offset, ARMRegisters::S0);
+            halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+        }
+    } else {
+        if (offset >= -0xff)
+            halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
+        else if (offset >= -0xffff) {
+            sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
+            halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
+        } else {
+            moveImm(offset, ARMRegisters::S0);
+            halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+        }
+    }
+}
+
+void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+    if (!scale && !offset) {
+        halfDtrUpRegister(transferType, srcDst, base, index);
+        return;
+    }
+
+    ARMWord op2 = lsl(index, scale);
+
+    if (offset <= 0xffff && offset >= -0xffff) {
+        add(ARMRegisters::S0, base, op2);
+        dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
+        return;
+    }
+
+    moveImm(offset, ARMRegisters::S0);
+    add(ARMRegisters::S0, ARMRegisters::S0, op2);
+    halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+    // VFP cannot directly access memory that is not four-byte-aligned
+    if (!(offset & 0x3)) {
+        if (offset <= 0x3ff && offset >= 0) {
+            doubleDtrUp(transferType, srcDst, base, offset >> 2);
+            return;
+        }
+        if (offset <= 0x3ffff && offset >= 0) {
+            add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+            doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+            return;
+        }
+        offset = -offset;
+
+        if (offset <= 0x3ff && offset >= 0) {
+            doubleDtrDown(transferType, srcDst, base, offset >> 2);
+            return;
+        }
+        if (offset <= 0x3ffff && offset >= 0) {
+            sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+            doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+            return;
+        }
+        offset = -offset;
+    }
+
+    moveImm(offset, ARMRegisters::S0);
+    add(ARMRegisters::S0, ARMRegisters::S0, base);
+    doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
+}
+
+void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+    add(ARMRegisters::S1, base, lsl(index, scale));
+    dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
+}
+
+void ARMAssembler::prepareExecutableCopy(void* to)
+{
+    // 64-bit alignment is required for next constant pool and JIT code as well
+    m_buffer.flushWithoutBarrier(true);
+
+    char* data = reinterpret_cast(m_buffer.data());
+    ptrdiff_t delta = reinterpret_cast(to) - data;
+
+    for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+        // The last bit is set if the constant must be placed on constant pool.
+        int pos = (iter->m_offset) & (~0x1);
+        ARMWord* ldrAddr = reinterpret_cast_ptr(data + pos);
+        ARMWord* addr = getLdrImmAddress(ldrAddr);
+        if (*addr != InvalidBranchTarget) {
+            if (!(iter->m_offset & 1)) {
+                intptr_t difference = reinterpret_cast_ptr(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
+
+                if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+                    *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
+                    continue;
+                }
+            }
+            *addr = reinterpret_cast(data + delta + *addr);
+        }
+    }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/assembler/ARMAssembler.h b/assembler/ARMAssembler.h
new file mode 100644
index 0000000..6fba9ed
--- /dev/null
+++ b/assembler/ARMAssembler.h
@@ -0,0 +1,1212 @@
+/*
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include 
+namespace JSC {
+
+    typedef uint32_t ARMWord;
+
+    #define FOR_EACH_CPU_REGISTER(V) \
+        FOR_EACH_CPU_GPREGISTER(V) \
+        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        FOR_EACH_CPU_FPREGISTER(V)
+
+    #define FOR_EACH_CPU_GPREGISTER(V) \
+        V(void*, r0) \
+        V(void*, r1) \
+        V(void*, r2) \
+        V(void*, r3) \
+        V(void*, r4) \
+        V(void*, r5) \
+        V(void*, r6) \
+        V(void*, r7) \
+        V(void*, r8) \
+        V(void*, r9) \
+        V(void*, r10) \
+        V(void*, fp) \
+        V(void*, ip) \
+        V(void*, sp) \
+        V(void*, lr) \
+        V(void*, pc) \
+
+    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        V(void*, apsr) \
+        V(void*, fpscr) \
+
+    #define FOR_EACH_CPU_FPREGISTER(V) \
+        V(double, d0) \
+        V(double, d1) \
+        V(double, d2) \
+        V(double, d3) \
+        V(double, d4) \
+        V(double, d5) \
+        V(double, d6) \
+        V(double, d7) \
+        V(double, d8) \
+        V(double, d9) \
+        V(double, d10) \
+        V(double, d11) \
+        V(double, d12) \
+        V(double, d13) \
+        V(double, d14) \
+        V(double, d15) \
+        V(double, d16) \
+        V(double, d17) \
+        V(double, d18) \
+        V(double, d19) \
+        V(double, d20) \
+        V(double, d21) \
+        V(double, d22) \
+        V(double, d23) \
+        V(double, d24) \
+        V(double, d25) \
+        V(double, d26) \
+        V(double, d27) \
+        V(double, d28) \
+        V(double, d29) \
+        V(double, d30) \
+        V(double, d31) \
+
+    namespace ARMRegisters {
+
+        typedef enum {
+            #define DECLARE_REGISTER(_type, _regName) _regName,
+            FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+            #undef DECLARE_REGISTER
+
+            // Pseudonyms for some of the registers.
+            S0 = r6,
+            r11 = fp, // frame pointer
+            r12 = ip, S1 = ip,
+            r13 = sp,
+            r14 = lr,
+            r15 = pc
+        } RegisterID;
+
+        typedef enum {
+            #define DECLARE_REGISTER(_type, _regName) _regName,
+            FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+            #undef DECLARE_REGISTER
+
+            // Pseudonyms for some of the registers.
+            SD0 = d7, /* Same as thumb assembler. */
+        } FPRegisterID;
+
+    } // namespace ARMRegisters
+
+    class ARMAssembler {
+    public:
+        typedef ARMRegisters::RegisterID RegisterID;
+        typedef ARMRegisters::FPRegisterID FPRegisterID;
+        typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+        typedef SegmentedVector Jumps;
+
+        ARMAssembler()
+            : m_indexOfTailOfLastWatchpoint(1)
+        {
+        }
+
+        ARMBuffer& buffer() { return m_buffer; }
+
+        static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
+        static constexpr RegisterID lastRegister() { return ARMRegisters::r15; }
+
+        static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+        static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+
+        // ARM conditional constants
+        typedef enum {
+            EQ = 0x00000000, // Zero / Equal.
+            NE = 0x10000000, // Non-zero / Not equal.
+            CS = 0x20000000, // Unsigned higher or same.
+            CC = 0x30000000, // Unsigned lower.
+            MI = 0x40000000, // Negative.
+            PL = 0x50000000, // Positive or zero.
+            VS = 0x60000000, // Overflowed.
+            VC = 0x70000000, // Not overflowed.
+            HI = 0x80000000, // Unsigned higher.
+            LS = 0x90000000, // Unsigned lower or same.
+            GE = 0xa0000000, // Signed greater than or equal.
+            LT = 0xb0000000, // Signed less than.
+            GT = 0xc0000000, // Signed greater than.
+            LE = 0xd0000000, // Signed less than or equal.
+            AL = 0xe0000000  // Unconditional / Always execute.
+        } Condition;
+
+        // ARM instruction constants
+        enum {
+            AND = (0x0 << 21),
+            EOR = (0x1 << 21),
+            SUB = (0x2 << 21),
+            RSB = (0x3 << 21),
+            ADD = (0x4 << 21),
+            ADC = (0x5 << 21),
+            SBC = (0x6 << 21),
+            RSC = (0x7 << 21),
+            TST = (0x8 << 21),
+            TEQ = (0x9 << 21),
+            CMP = (0xa << 21),
+            CMN = (0xb << 21),
+            ORR = (0xc << 21),
+            MOV = (0xd << 21),
+            BIC = (0xe << 21),
+            MVN = (0xf << 21),
+            MUL = 0x00000090,
+            MULL = 0x00c00090,
+            VMOV_F64 = 0x0eb00b40,
+            VADD_F64 = 0x0e300b00,
+            VDIV_F64 = 0x0e800b00,
+            VSUB_F64 = 0x0e300b40,
+            VMUL_F64 = 0x0e200b00,
+            VCMP_F64 = 0x0eb40b40,
+            VSQRT_F64 = 0x0eb10bc0,
+            VABS_F64 = 0x0eb00bc0,
+            VNEG_F64 = 0x0eb10b40,
+            STMDB = 0x09200000,
+            LDMIA = 0x08b00000,
+            B = 0x0a000000,
+            BL = 0x0b000000,
+            BX = 0x012fff10,
+            VMOV_VFP64 = 0x0c400a10,
+            VMOV_ARM64 = 0x0c500a10,
+            VMOV_VFP32 = 0x0e000a10,
+            VMOV_ARM32 = 0x0e100a10,
+            VCVT_F64_S32 = 0x0eb80bc0,
+            VCVT_S32_F64 = 0x0ebd0bc0,
+            VCVT_U32_F64 = 0x0ebc0bc0,
+            VCVT_F32_F64 = 0x0eb70bc0,
+            VCVT_F64_F32 = 0x0eb70ac0,
+            VMRS_APSR = 0x0ef1fa10,
+            CLZ = 0x016f0f10,
+            BKPT = 0xe1200070,
+            BLX = 0x012fff30,
+#if WTF_ARM_ARCH_AT_LEAST(7)
+            MOVW = 0x03000000,
+            MOVT = 0x03400000,
+#endif
+            NOP = 0xe1a00000,
+            DMB_SY = 0xf57ff05f,
+            DMB_ISHST = 0xf57ff05a,
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+            SDIV = 0x0710f010,
+            UDIV = 0x0730f010,
+#endif
+        };
+
+        enum {
+            Op2Immediate = (1 << 25),
+            ImmediateForHalfWordTransfer = (1 << 22),
+            Op2InvertedImmediate = (1 << 26),
+            SetConditionalCodes = (1 << 20),
+            Op2IsRegisterArgument = (1 << 25),
+            // Data transfer flags.
+            DataTransferUp = (1 << 23),
+            DataTransferWriteBack = (1 << 21),
+            DataTransferPostUpdate = (1 << 24),
+            DataTransferLoad = (1 << 20),
+            ByteDataTransfer = (1 << 22),
+        };
+
+        enum DataTransferTypeA {
+            LoadUint32 = 0x05000000 | DataTransferLoad,
+            LoadUint8 = 0x05400000 | DataTransferLoad,
+            StoreUint32 = 0x05000000,
+            StoreUint8 = 0x05400000,
+        };
+
+        enum DataTransferTypeB {
+            LoadUint16 = 0x010000b0 | DataTransferLoad,
+            LoadInt16 = 0x010000f0 | DataTransferLoad,
+            LoadInt8 = 0x010000d0 | DataTransferLoad,
+            StoreUint16 = 0x010000b0,
+        };
+
+        enum DataTransferTypeFloat {
+            LoadFloat = 0x0d000a00 | DataTransferLoad,
+            LoadDouble = 0x0d000b00 | DataTransferLoad,
+            StoreFloat = 0x0d000a00,
+            StoreDouble = 0x0d000b00,
+        };
+
+        // Masks of ARM instructions
+        enum {
+            BranchOffsetMask = 0x00ffffff,
+            ConditionalFieldMask = 0xf0000000,
+            DataTransferOffsetMask = 0xfff,
+        };
+
+        enum {
+            MinimumBranchOffsetDistance = -0x00800000,
+            MaximumBranchOffsetDistance = 0x007fffff,
+        };
+
+        enum {
+            padForAlign8  = 0x00,
+            padForAlign16 = 0x0000,
+            padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
+        };
+
+        static const ARMWord InvalidImmediate = 0xf0000000;
+        static const ARMWord InvalidBranchTarget = 0xffffffff;
+        static const int DefaultPrefetchOffset = 2;
+
+        static const ARMWord BlxInstructionMask = 0x012fff30;
+        static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
+        static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
+
+        static const ARMWord AddImmediateInstruction = 0x02800000;
+        static const ARMWord BlxInstruction = 0x012fff30;
+        static const ARMWord LdrImmediateInstruction = 0x05900000;
+        static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
+
+        // Instruction formating
+
+        void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
+        {
+            ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
+            m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+        }
+
+        void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
+        {
+            ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
+            m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
+                               | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
+                               | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
+        }
+
+        void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
+        {
+            ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
+            m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
+                               | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
+                               | (sm >> 1) | ((sm & 0x1) << 5));
+        }
+
+        void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
+        }
+
+        void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
+        }
+
+        void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
+        }
+
+        void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
+        }
+
+        void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void add(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
+        }
+
+        void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
+        }
+
+        void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
+        }
+
+        void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
+        }
+
+        void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void tst(int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
+        }
+
+        void teq(int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
+        }
+
+        void cmp(int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
+        }
+
+        void cmn(int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
+        }
+
+        void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
+        }
+
+        void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void mov(int rd, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
+        }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+        void movw(int rd, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT((op2 | 0xf0fff) == 0xf0fff);
+            m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
+        }
+
+        void movt(int rd, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT((op2 | 0xf0fff) == 0xf0fff);
+            m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
+        }
+#endif
+
+        void movs(int rd, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+        }
+
+        void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
+        }
+
+        void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
+        }
+
+        void mvn(int rd, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
+        }
+
+        void mvns(int rd, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+        }
+
+        void mul(int rd, int rn, int rm, Condition cc = AL)
+        {
+            m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+        }
+
+        void muls(int rd, int rn, int rm, Condition cc = AL)
+        {
+            m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
+        }
+
+        void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+        {
+            m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+        }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+        template
+        void sdiv(int rd, int rn, int rm, Condition cc = AL)
+        {
+            static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
+            ASSERT(rd != ARMRegisters::pc);
+            ASSERT(rn != ARMRegisters::pc);
+            ASSERT(rm != ARMRegisters::pc);
+            m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm));
+        }
+
+        void udiv(int rd, int rn, int rm, Condition cc = AL)
+        {
+            ASSERT(rd != ARMRegisters::pc);
+            ASSERT(rn != ARMRegisters::pc);
+            ASSERT(rm != ARMRegisters::pc);
+            m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm));
+        }
+#endif
+
+        void vmov_f64(int dd, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
+        }
+
+        void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
+        }
+
+        void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
+        }
+
+        void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
+        }
+
+        void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
+        }
+
+        void vcmp_f64(int dd, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
+        }
+
+        void vsqrt_f64(int dd, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
+        }
+
+        void vabs_f64(int dd, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
+        }
+
+        void vneg_f64(int dd, int dm, Condition cc = AL)
+        {
+            emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
+        }
+
+        void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
+        {
+            m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
+        }
+
+        void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
+        {
+            m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
+        }
+
+        void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+        }
+
+        void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
+        }
+
+        void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+        }
+
+        void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
+        }
+
+        void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+        }
+
+        void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
+        }
+
+        void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+        }
+
+        void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
+        }
+
+        void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT(op2 <= 0xff && rd <= 15);
+            /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+            m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
+        }
+
+        void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT(op2 <= 0xff && rd <= 15);
+            /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+            m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
+        }
+
+        void push(int reg, Condition cc = AL)
+        {
+            ASSERT(ARMWord(reg) <= 0xf);
+            m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+        }
+
+        void pop(int reg, Condition cc = AL)
+        {
+            ASSERT(ARMWord(reg) <= 0xf);
+            m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+        }
+
+        inline void poke(int reg, Condition cc = AL)
+        {
+            dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
+        }
+
+        inline void peek(int reg, Condition cc = AL)
+        {
+            dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
+        }
+
+        void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
+        {
+            ASSERT(rt != rt2);
+            m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+        }
+
+        void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
+        {
+            ASSERT(rt != rt2);
+            m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+        }
+
+        void vmov_vfp32(int sn, int rt, Condition cc = AL)
+        {
+            ASSERT(rt <= 15);
+            emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
+        }
+
+        void vmov_arm32(int rt, int sn, Condition cc = AL)
+        {
+            ASSERT(rt <= 15);
+            emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
+        }
+
+        void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
+        {
+            ASSERT(!(sm & 0x1)); // sm must be divisible by 2
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
+        }
+
+        void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
+        {
+            ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
+        }
+
+        void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
+        {
+            ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
+        }
+
+        void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
+        {
+            ASSERT(dd <= 15 && sm <= 15);
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
+        }
+
+        void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
+        {
+            ASSERT(dd <= 15 && sm <= 15);
+            emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
+        }
+
+        void vmrs_apsr(Condition cc = AL)
+        {
+            m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
+        }
+
+        void clz(int rd, int rm, Condition cc = AL)
+        {
+            m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
+        }
+
+        void bkpt(ARMWord value)
+        {
+            m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+        }
+
+        void nop()
+        {
+            m_buffer.putInt(NOP);
+        }
+
+        static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+        {
+            UNUSED_PARAM(isCopyingToExecutableMemory);
+            RELEASE_ASSERT(!(size % sizeof(int32_t)));
+
+            int32_t* ptr = static_cast(base);
+            const size_t num32s = size / sizeof(int32_t);
+            const int32_t insn = NOP;
+            for (size_t i = 0; i < num32s; i++)
+                *ptr++ = insn;
+        }
+
+        void dmbSY()
+        {
+            m_buffer.putInt(DMB_SY);
+        }
+
+        void dmbISHST()
+        {
+            m_buffer.putInt(DMB_ISHST);
+        }
+
+        void bx(int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
+        }
+
+        AssemblerLabel blx(int rm, Condition cc = AL)
+        {
+            emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
+            return m_buffer.label();
+        }
+
+        static ARMWord lsl(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | 0x00;
+        }
+
+        static ARMWord lsr(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | 0x20;
+        }
+
+        static ARMWord asr(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | 0x40;
+        }
+
+        static ARMWord lslRegister(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | 0x10;
+        }
+
+        static ARMWord lsrRegister(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | 0x30;
+        }
+
+        static ARMWord asrRegister(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | 0x50;
+        }
+
+        // General helpers
+
+        size_t codeSize() const
+        {
+            return m_buffer.codeSize();
+        }
+
+        void ensureSpace(int insnSpace, int constSpace)
+        {
+            m_buffer.ensureSpace(insnSpace, constSpace);
+        }
+
+        int sizeOfConstantPool()
+        {
+            return m_buffer.sizeOfConstantPool();
+        }
+
+        AssemblerLabel labelIgnoringWatchpoints()
+        {
+            m_buffer.ensureSpaceForAnyInstruction();
+            return m_buffer.label();
+        }
+
+        AssemblerLabel labelForWatchpoint()
+        {
+            m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
+            AssemblerLabel result = m_buffer.label();
+            if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
+                result = label();
+            m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+            return label();
+        }
+
+        AssemblerLabel label()
+        {
+            AssemblerLabel result = labelIgnoringWatchpoints();
+            while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
+                nop();
+                // The available number of instructions are ensured by labelForWatchpoint.
+                result = m_buffer.label();
+            }
+            return result;
+        }
+
+        AssemblerLabel align(int alignment)
+        {
+            while (!m_buffer.isAligned(alignment))
+                mov(ARMRegisters::r0, ARMRegisters::r0);
+
+            return label();
+        }
+
+        AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+        {
+            ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+            m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
+            ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
+            return m_buffer.label();
+        }
+
+        AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
+        {
+            return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+        }
+
+        void prepareExecutableCopy(void* to);
+
+        unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+        // DFG assembly helpers for moving data between fp and registers.
+        void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+        {
+            vmov_arm64(rd1, rd2, rn);
+        }
+
+        void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+        {
+            vmov_vfp64(rd, rn1, rn2);
+        }
+
+        // Patching helpers
+
+        static ARMWord* getLdrImmAddress(ARMWord* insn)
+        {
+            // Check for call
+            if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
+                // Must be BLX
+                ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
+                insn--;
+            }
+
+            // Must be an ldr ..., [pc +/- imm]
+            ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+            ARMWord addr = reinterpret_cast(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
+            if (*insn & DataTransferUp)
+                return reinterpret_cast(addr + (*insn & DataTransferOffsetMask));
+            return reinterpret_cast(addr - (*insn & DataTransferOffsetMask));
+        }
+
+        static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+        {
+            // Must be an ldr ..., [pc +/- imm]
+            ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+            if (*insn & 0x1)
+                return reinterpret_cast(constPool + ((*insn & DataTransferOffsetMask) >> 1));
+            return getLdrImmAddress(insn);
+        }
+
+        static void patchPointerInternal(intptr_t from, void* to)
+        {
+            ARMWord* insn = reinterpret_cast(from);
+            ARMWord* addr = getLdrImmAddress(insn);
+            *addr = reinterpret_cast(to);
+        }
+
+        static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+        {
+            value = (value << 1) + 1;
+            ASSERT(!(value & ~DataTransferOffsetMask));
+            return (load & ~DataTransferOffsetMask) | value;
+        }
+
+        static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+        // Read pointers
+        static void* readPointer(void* from)
+        {
+            ARMWord* instruction = reinterpret_cast(from);
+            ARMWord* address = getLdrImmAddress(instruction);
+            return *reinterpret_cast(address);
+        }
+
+        // Patch pointers
+
+        static void linkPointer(void* code, AssemblerLabel from, void* to)
+        {
+            patchPointerInternal(reinterpret_cast(code) + from.m_offset, to);
+        }
+
+        static void repatchInt32(void* where, int32_t to)
+        {
+            patchPointerInternal(reinterpret_cast(where), reinterpret_cast(to));
+        }
+
+        static void repatchCompact(void* where, int32_t value)
+        {
+            ARMWord* instruction = reinterpret_cast(where);
+            ASSERT((*instruction & 0x0f700000) == LoadUint32);
+            if (value >= 0)
+                *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
+            else
+                *instruction = (*instruction & 0xff7ff000) | -value;
+            cacheFlush(instruction, sizeof(ARMWord));
+        }
+
+        static void repatchPointer(void* from, void* to)
+        {
+            patchPointerInternal(reinterpret_cast(from), to);
+        }
+
+        // Linkers
+        static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
+        {
+            return reinterpret_cast(base) + offset - sizeof(ARMWord);
+        }
+
+        void linkJump(AssemblerLabel from, AssemblerLabel to)
+        {
+            ARMWord* insn = reinterpret_cast(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
+            ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+            *addr = toARMWord(to.m_offset);
+        }
+
+        static void linkJump(void* code, AssemblerLabel from, void* to)
+        {
+            patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+        }
+
+        static void relinkJump(void* from, void* to)
+        {
+            patchPointerInternal(getAbsoluteJumpAddress(from), to);
+        }
+
+        static void relinkJumpToNop(void* from)
+        {
+            relinkJump(from, from);
+        }
+
+        static void linkCall(void* code, AssemblerLabel from, void* to)
+        {
+            patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+        }
+
+        static void relinkCall(void* from, void* to)
+        {
+            patchPointerInternal(getAbsoluteJumpAddress(from), to);
+        }
+
+        static void* readCallTarget(void* from)
+        {
+            return reinterpret_cast(readPointer(reinterpret_cast(getAbsoluteJumpAddress(from))));
+        }
+
+        static void replaceWithJump(void* instructionStart, void* to)
+        {
+            ARMWord* instruction = reinterpret_cast(instructionStart);
+            intptr_t difference = reinterpret_cast(to) - (reinterpret_cast(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
+
+            if (!(difference & 1)) {
+                difference >>= 2;
+                if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+                     // Direct branch.
+                     instruction[0] = B | AL | (difference & BranchOffsetMask);
+                     cacheFlush(instruction, sizeof(ARMWord));
+                     return;
+                }
+            }
+
+            // Load target.
+            instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
+            instruction[1] = reinterpret_cast(to);
+            cacheFlush(instruction, sizeof(ARMWord) * 2);
+        }
+
+        static ptrdiff_t maxJumpReplacementSize()
+        {
+            return sizeof(ARMWord) * 2;
+        }
+
+        static constexpr ptrdiff_t patchableJumpSize()
+        {
+            return sizeof(ARMWord) * 3;
+        }
+
+        static void replaceWithLoad(void* instructionStart)
+        {
+            ARMWord* instruction = reinterpret_cast(instructionStart);
+            cacheFlush(instruction, sizeof(ARMWord));
+
+            ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+            if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
+                 *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
+                 cacheFlush(instruction, sizeof(ARMWord));
+            }
+        }
+
+        static void replaceWithAddressComputation(void* instructionStart)
+        {
+            ARMWord* instruction = reinterpret_cast(instructionStart);
+            cacheFlush(instruction, sizeof(ARMWord));
+
+            ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+            if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
+                 *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
+                 cacheFlush(instruction, sizeof(ARMWord));
+            }
+        }
+
+        static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
+        {
+            ARMWord* instruction = reinterpret_cast(instructionStart);
+
+            ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+            instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
+            *getLdrImmAddress(instruction) = imm;
+            instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
+            cacheFlush(instruction, 2 * sizeof(ARMWord));
+        }
+
+        // Address operations
+
+        static void* getRelocatedAddress(void* code, AssemblerLabel label)
+        {
+            return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+        }
+
+        // Address differences
+
+        static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+        {
+            return b.m_offset - a.m_offset;
+        }
+
+        static unsigned getCallReturnOffset(AssemblerLabel call)
+        {
+            return call.m_offset;
+        }
+
+        // Handle immediates
+
+        static ARMWord getOp2(ARMWord imm);
+
+        // Fast case if imm is known to be between 0 and 0xff
+        static ARMWord getOp2Byte(ARMWord imm)
+        {
+            ASSERT(imm <= 0xff);
+            return Op2Immediate | imm;
+        }
+
+        static ARMWord getOp2Half(ARMWord imm)
+        {
+            ASSERT(imm <= 0xff);
+            return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
+        }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+        static ARMWord getImm16Op2(ARMWord imm)
+        {
+            if (imm <= 0xffff)
+                return (imm & 0xf000) << 4 | (imm & 0xfff);
+            return InvalidImmediate;
+        }
+#endif
+        ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+        void moveImm(ARMWord imm, int dest);
+        ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+        // Memory load/store helpers
+
+        void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
+        void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+        void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
+        void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+        void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
+        void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+        // Constant pool hnadlers
+
+        static ARMWord placeConstantPoolBarrier(int offset)
+        {
+            offset = (offset - sizeof(ARMWord)) >> 2;
+            ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
+            return AL | B | (offset & BranchOffsetMask);
+        }
+
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+        static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+        {
+            asm volatile(
+                "push    {r7}\n"
+                "mov     r0, %0\n"
+                "mov     r1, %1\n"
+                "mov     r7, #0xf0000\n"
+                "add     r7, r7, #0x2\n"
+                "mov     r2, #0x0\n"
+                "svc     0x0\n"
+                "pop     {r7}\n"
+                :
+                : "r" (begin), "r" (end)
+                : "r0", "r1", "r2");
+        }
+#endif
+
+        static void cacheFlush(void* code, size_t size)
+        {
+#if OS(LINUX) && COMPILER(GCC_OR_CLANG)
+            size_t page = pageSize();
+            uintptr_t current = reinterpret_cast(code);
+            uintptr_t end = current + size;
+            uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+            if (end <= firstPageEnd) {
+                linuxPageFlush(current, end);
+                return;
+            }
+
+            linuxPageFlush(current, firstPageEnd);
+
+            for (current = firstPageEnd; current + page < end; current += page)
+                linuxPageFlush(current, current + page);
+
+            linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+        }
+
+    private:
+        static ARMWord RM(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg;
+        }
+
+        static ARMWord RS(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 8;
+        }
+
+        static ARMWord RD(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 12;
+        }
+
+        static ARMWord RN(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 16;
+        }
+
+        static ARMWord getConditionalField(ARMWord i)
+        {
+            return i & ConditionalFieldMask;
+        }
+
+        static ARMWord toARMWord(Condition cc)
+        {
+            return static_cast(cc);
+        }
+
+        static ARMWord toARMWord(uint32_t u)
+        {
+            return static_cast(u);
+        }
+
+        int genInt(int reg, ARMWord imm, bool positive);
+
+        ARMBuffer m_buffer;
+        Jumps m_jumps;
+        uint32_t m_indexOfTailOfLastWatchpoint;
+    };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/assembler/ARMv7Assembler.h b/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000..86218ea
--- /dev/null
+++ b/assembler/ARMv7Assembler.h
@@ -0,0 +1,2962 @@
+/*
+ * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+namespace ARMRegisters {
+
+    #define FOR_EACH_CPU_REGISTER(V) \
+        FOR_EACH_CPU_GPREGISTER(V) \
+        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        FOR_EACH_CPU_FPREGISTER(V)
+
+    // The following are defined as pairs of the following value:
+    // 1. type of the storage needed to save the register value by the JIT probe.
+    // 2. name of the register.
+    #define FOR_EACH_CPU_GPREGISTER(V) \
+        V(void*, r0) \
+        V(void*, r1) \
+        V(void*, r2) \
+        V(void*, r3) \
+        V(void*, r4) \
+        V(void*, r5) \
+        V(void*, r6) \
+        V(void*, r7) \
+        V(void*, r8) \
+        V(void*, r9) \
+        V(void*, r10) \
+        V(void*, r11) \
+        V(void*, ip) \
+        V(void*, sp) \
+        V(void*, lr) \
+        V(void*, pc)
+
+    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+        V(void*, apsr) \
+        V(void*, fpscr) \
+
+    #define FOR_EACH_CPU_FPREGISTER(V) \
+        V(double, d0) \
+        V(double, d1) \
+        V(double, d2) \
+        V(double, d3) \
+        V(double, d4) \
+        V(double, d5) \
+        V(double, d6) \
+        V(double, d7) \
+        V(double, d8) \
+        V(double, d9) \
+        V(double, d10) \
+        V(double, d11) \
+        V(double, d12) \
+        V(double, d13) \
+        V(double, d14) \
+        V(double, d15) \
+        V(double, d16) \
+        V(double, d17) \
+        V(double, d18) \
+        V(double, d19) \
+        V(double, d20) \
+        V(double, d21) \
+        V(double, d22) \
+        V(double, d23) \
+        V(double, d24) \
+        V(double, d25) \
+        V(double, d26) \
+        V(double, d27) \
+        V(double, d28) \
+        V(double, d29) \
+        V(double, d30) \
+        V(double, d31)
+
+    typedef enum {
+        #define DECLARE_REGISTER(_type, _regName) _regName,
+        FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
+
+        fp = r7,   // frame pointer
+        sb = r9,   // static base
+        sl = r10,  // stack limit
+        r12 = ip,
+        r13 = sp,
+        r14 = lr,
+        r15 = pc
+    } RegisterID;
+
+    typedef enum {
+        s0,
+        s1,
+        s2,
+        s3,
+        s4,
+        s5,
+        s6,
+        s7,
+        s8,
+        s9,
+        s10,
+        s11,
+        s12,
+        s13,
+        s14,
+        s15,
+        s16,
+        s17,
+        s18,
+        s19,
+        s20,
+        s21,
+        s22,
+        s23,
+        s24,
+        s25,
+        s26,
+        s27,
+        s28,
+        s29,
+        s30,
+        s31,
+    } FPSingleRegisterID;
+
+    typedef enum {
+        #define DECLARE_REGISTER(_type, _regName) _regName,
+        FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
+    } FPDoubleRegisterID;
+
+    typedef enum {
+        q0,
+        q1,
+        q2,
+        q3,
+        q4,
+        q5,
+        q6,
+        q7,
+        q8,
+        q9,
+        q10,
+        q11,
+        q12,
+        q13,
+        q14,
+        q15,
+        q16,
+        q17,
+        q18,
+        q19,
+        q20,
+        q21,
+        q22,
+        q23,
+        q24,
+        q25,
+        q26,
+        q27,
+        q28,
+        q29,
+        q30,
+        q31,
+    } FPQuadRegisterID;
+
+    inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
+    {
+        ASSERT(reg < d16);
+        return (FPSingleRegisterID)(reg << 1);
+    }
+
+    inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
+    {
+        ASSERT(!(reg & 1));
+        return (FPDoubleRegisterID)(reg >> 1);
+    }
+
+} // namespace ARMRegisters
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+    friend class ARMv7Assembler;
+
+    typedef uint8_t ThumbImmediateType;
+    static const ThumbImmediateType TypeInvalid = 0;
+    static const ThumbImmediateType TypeEncoded = 1;
+    static const ThumbImmediateType TypeUInt16 = 2;
+
+    typedef union {
+        int16_t asInt;
+        struct {
+            unsigned imm8 : 8;
+            unsigned imm3 : 3;
+            unsigned i    : 1;
+            unsigned imm4 : 4;
+        };
+        // If this is an encoded immediate, then it may describe a shift, or a pattern.
+        struct {
+            unsigned shiftValue7 : 7;
+            unsigned shiftAmount : 5;
+        };
+        struct {
+            unsigned immediate   : 8;
+            unsigned pattern     : 4;
+        };
+    } ThumbImmediateValue;
+
+    // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+    typedef union {
+        int32_t asInt;
+        struct {
+            uint8_t byte0;
+            uint8_t byte1;
+            uint8_t byte2;
+            uint8_t byte3;
+        };
+    } PatternBytes;
+
+    ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+    {
+        if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
+            value >>= N;             /* if any were set, lose the bottom N */
+        else                         /* if none of the top N bits are set, */
+            zeros += N;              /* then we have identified N leading zeros */
+    }
+
+    static int32_t countLeadingZeros(uint32_t value)
+    {
+        if (!value)
+            return 32;
+
+        int32_t zeros = 0;
+        countLeadingZerosPartial(value, zeros, 16);
+        countLeadingZerosPartial(value, zeros, 8);
+        countLeadingZerosPartial(value, zeros, 4);
+        countLeadingZerosPartial(value, zeros, 2);
+        countLeadingZerosPartial(value, zeros, 1);
+        return zeros;
+    }
+
+    ARMThumbImmediate()
+        : m_type(TypeInvalid)
+    {
+        m_value.asInt = 0;
+    }
+        
+    ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+        : m_type(type)
+        , m_value(value)
+    {
+    }
+
+    ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+        : m_type(TypeUInt16)
+    {
+        // Make sure this constructor is only reached with type TypeUInt16;
+        // this extra parameter makes the code a little clearer by making it
+        // explicit at call sites which type is being constructed
+        ASSERT_UNUSED(type, type == TypeUInt16);
+
+        m_value.asInt = value;
+    }
+
+public:
+    static ARMThumbImmediate makeEncodedImm(uint32_t value)
+    {
+        ThumbImmediateValue encoding;
+        encoding.asInt = 0;
+
+        // okay, these are easy.
+        if (value < 256) {
+            encoding.immediate = value;
+            encoding.pattern = 0;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        int32_t leadingZeros = countLeadingZeros(value);
+        // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+        ASSERT(leadingZeros < 24);
+
+        // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+        // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+        // zero.  count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+        int32_t rightShiftAmount = 24 - leadingZeros;
+        if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+            // Shift the value down to the low byte position.  The assign to 
+            // shiftValue7 drops the implicit top bit.
+            encoding.shiftValue7 = value >> rightShiftAmount;
+            // The endoded shift amount is the magnitude of a right rotate.
+            encoding.shiftAmount = 8 + leadingZeros;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+        
+        PatternBytes bytes;
+        bytes.asInt = value;
+
+        if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+            encoding.immediate = bytes.byte0;
+            encoding.pattern = 3;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+            encoding.immediate = bytes.byte0;
+            encoding.pattern = 1;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+            encoding.immediate = bytes.byte1;
+            encoding.pattern = 2;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        return ARMThumbImmediate();
+    }
+
+    static ARMThumbImmediate makeUInt12(int32_t value)
+    {
+        return (!(value & 0xfffff000))
+            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+            : ARMThumbImmediate();
+    }
+
+    static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+    {
+        // If this is not a 12-bit unsigned it, try making an encoded immediate.
+        return (!(value & 0xfffff000))
+            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+            : makeEncodedImm(value);
+    }
+
+    // The 'make' methods, above, return a !isValid() value if the argument
+    // cannot be represented as the requested type.  This methods  is called
+    // 'get' since the argument can always be represented.
+    static ARMThumbImmediate makeUInt16(uint16_t value)
+    {
+        return ARMThumbImmediate(TypeUInt16, value);
+    }
+    
+    bool isValid()
+    {
+        return m_type != TypeInvalid;
+    }
+
+    uint16_t asUInt16() const { return m_value.asInt; }
+
+    // These methods rely on the format of encoded byte values.
+    bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+    bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+    bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+    bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+    bool isUInt7() { return !(m_value.asInt & 0xff80); }
+    bool isUInt8() { return !(m_value.asInt & 0xff00); }
+    bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+    bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+    bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+    bool isUInt16() { return m_type == TypeUInt16; }
+    uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+    uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+    uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+    uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+    uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+    uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+    uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+    uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+    uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+    uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+    bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+    ThumbImmediateType m_type;
+    ThumbImmediateValue m_value;
+};
+
+typedef enum {
+    SRType_LSL,
+    SRType_LSR,
+    SRType_ASR,
+    SRType_ROR,
+
+    SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ShiftTypeAndAmount {
+    friend class ARMv7Assembler;
+
+public:
+    ShiftTypeAndAmount()
+    {
+        m_u.type = (ARMShiftType)0;
+        m_u.amount = 0;
+    }
+    
+    ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+    {
+        m_u.type = type;
+        m_u.amount = amount & 31;
+    }
+    
+    unsigned lo4() { return m_u.lo4; }
+    unsigned hi4() { return m_u.hi4; }
+    
+private:
+    union {
+        struct {
+            unsigned lo4 : 4;
+            unsigned hi4 : 4;
+        };
+        struct {
+            unsigned type   : 2;
+            unsigned amount : 6;
+        };
+    } m_u;
+};
+
+class ARMv7Assembler {
+public:
+    typedef ARMRegisters::RegisterID RegisterID;
+    typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
+    typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
+    typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
+    typedef FPDoubleRegisterID FPRegisterID;
+    
+    static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
+    static constexpr RegisterID lastRegister() { return ARMRegisters::r13; }
+
+    static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+    static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+
+    // (HS, LO, HI, LS) -> (AE, B, A, BE)
+    // (VS, VC) -> (O, NO)
+    typedef enum {
+        ConditionEQ, // Zero / Equal.
+        ConditionNE, // Non-zero / Not equal.
+        ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
+        ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
+        ConditionMI, // Negative.
+        ConditionPL, // Positive or zero.
+        ConditionVS, // Overflowed.
+        ConditionVC, // Not overflowed.
+        ConditionHI, // Unsigned higher.
+        ConditionLS, // Unsigned lower or same.
+        ConditionGE, // Signed greater than or equal.
+        ConditionLT, // Signed less than.
+        ConditionGT, // Signed greater than.
+        ConditionLE, // Signed less than or equal.
+        ConditionAL, // Unconditional / Always execute.
+        ConditionInvalid
+    } Condition;
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 3) 
+    enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), 
+                    JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
+                    JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
+                    JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
+                    JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
+    };
+    enum JumpLinkType { 
+        LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+        LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
+        LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
+        LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
+        LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
+        LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
+        LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
+        LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
+    };
+
+    class LinkRecord {
+    public:
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+        {
+            data.realTypes.m_from = from;
+            data.realTypes.m_to = to;
+            data.realTypes.m_type = type;
+            data.realTypes.m_linkType = LinkInvalid;
+            data.realTypes.m_condition = condition;
+        }
+        void operator=(const LinkRecord& other)
+        {
+            data.copyTypes.content[0] = other.data.copyTypes.content[0];
+            data.copyTypes.content[1] = other.data.copyTypes.content[1];
+            data.copyTypes.content[2] = other.data.copyTypes.content[2];
+        }
+        intptr_t from() const { return data.realTypes.m_from; }
+        void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+        intptr_t to() const { return data.realTypes.m_to; }
+        JumpType type() const { return data.realTypes.m_type; }
+        JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+        void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+        Condition condition() const { return data.realTypes.m_condition; }
+    private:
+        union {
+            struct RealTypes {
+                intptr_t m_from : 31;
+                intptr_t m_to : 31;
+                JumpType m_type : 8;
+                JumpLinkType m_linkType : 8;
+                Condition m_condition : 16;
+            } realTypes;
+            struct CopyTypes {
+                uint32_t content[3];
+            } copyTypes;
+            COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+        } data;
+    };
+
+    ARMv7Assembler()
+        : m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+
+    AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
+private:
+
+    // ARMv7, Appx-A.6.3
+    static bool BadReg(RegisterID reg)
+    {
+        return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+    }
+
+    uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
+    {
+        uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+        if (rdNum & 1)
+            rdMask |= 1 << lowBitShift;
+        return rdMask;
+    }
+
+    uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
+    {
+        uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+        if (rdNum & 16)
+            rdMask |= 1 << highBitShift;
+        return rdMask;
+    }
+
+    typedef enum {
+        OP_ADD_reg_T1       = 0x1800,
+        OP_SUB_reg_T1       = 0x1A00,
+        OP_ADD_imm_T1       = 0x1C00,
+        OP_SUB_imm_T1       = 0x1E00,
+        OP_MOV_imm_T1       = 0x2000,
+        OP_CMP_imm_T1       = 0x2800,
+        OP_ADD_imm_T2       = 0x3000,
+        OP_SUB_imm_T2       = 0x3800,
+        OP_AND_reg_T1       = 0x4000,
+        OP_EOR_reg_T1       = 0x4040,
+        OP_TST_reg_T1       = 0x4200,
+        OP_RSB_imm_T1       = 0x4240,
+        OP_CMP_reg_T1       = 0x4280,
+        OP_ORR_reg_T1       = 0x4300,
+        OP_MVN_reg_T1       = 0x43C0,
+        OP_ADD_reg_T2       = 0x4400,
+        OP_MOV_reg_T1       = 0x4600,
+        OP_BLX              = 0x4700,
+        OP_BX               = 0x4700,
+        OP_STR_reg_T1       = 0x5000,
+        OP_STRH_reg_T1      = 0x5200,
+        OP_STRB_reg_T1      = 0x5400,
+        OP_LDRSB_reg_T1     = 0x5600,
+        OP_LDR_reg_T1       = 0x5800,
+        OP_LDRH_reg_T1      = 0x5A00,
+        OP_LDRB_reg_T1      = 0x5C00,
+        OP_LDRSH_reg_T1     = 0x5E00,
+        OP_STR_imm_T1       = 0x6000,
+        OP_LDR_imm_T1       = 0x6800,
+        OP_STRB_imm_T1      = 0x7000,
+        OP_LDRB_imm_T1      = 0x7800,
+        OP_STRH_imm_T1      = 0x8000,
+        OP_LDRH_imm_T1      = 0x8800,
+        OP_STR_imm_T2       = 0x9000,
+        OP_LDR_imm_T2       = 0x9800,
+        OP_ADD_SP_imm_T1    = 0xA800,
+        OP_ADD_SP_imm_T2    = 0xB000,
+        OP_SUB_SP_imm_T1    = 0xB080,
+        OP_PUSH_T1          = 0xB400,
+        OP_POP_T1           = 0xBC00,
+        OP_BKPT             = 0xBE00,
+        OP_IT               = 0xBF00,
+        OP_NOP_T1           = 0xBF00,
+    } OpcodeID;
+
+    typedef enum {
+        OP_B_T1         = 0xD000,
+        OP_B_T2         = 0xE000,
+        OP_POP_T2       = 0xE8BD,
+        OP_PUSH_T2      = 0xE92D,
+        OP_AND_reg_T2   = 0xEA00,
+        OP_TST_reg_T2   = 0xEA10,
+        OP_ORR_reg_T2   = 0xEA40,
+        OP_ORR_S_reg_T2 = 0xEA50,
+        OP_ASR_imm_T1   = 0xEA4F,
+        OP_LSL_imm_T1   = 0xEA4F,
+        OP_LSR_imm_T1   = 0xEA4F,
+        OP_ROR_imm_T1   = 0xEA4F,
+        OP_MVN_reg_T2   = 0xEA6F,
+        OP_EOR_reg_T2   = 0xEA80,
+        OP_ADD_reg_T3   = 0xEB00,
+        OP_ADD_S_reg_T3 = 0xEB10,
+        OP_SUB_reg_T2   = 0xEBA0,
+        OP_SUB_S_reg_T2 = 0xEBB0,
+        OP_CMP_reg_T2   = 0xEBB0,
+        OP_VMOV_CtoD    = 0xEC00,
+        OP_VMOV_DtoC    = 0xEC10,
+        OP_FSTS         = 0xED00,
+        OP_VSTR         = 0xED00,
+        OP_FLDS         = 0xED10,
+        OP_VLDR         = 0xED10,
+        OP_VMOV_CtoS    = 0xEE00,
+        OP_VMOV_StoC    = 0xEE10,
+        OP_VMUL_T2      = 0xEE20,
+        OP_VADD_T2      = 0xEE30,
+        OP_VSUB_T2      = 0xEE30,
+        OP_VDIV         = 0xEE80,
+        OP_VABS_T2      = 0xEEB0,
+        OP_VCMP         = 0xEEB0,
+        OP_VCVT_FPIVFP  = 0xEEB0,
+        OP_VMOV_T2      = 0xEEB0,
+        OP_VMOV_IMM_T2  = 0xEEB0,
+        OP_VMRS         = 0xEEB0,
+        OP_VNEG_T2      = 0xEEB0,
+        OP_VSQRT_T1     = 0xEEB0,
+        OP_VCVTSD_T1    = 0xEEB0,
+        OP_VCVTDS_T1    = 0xEEB0,
+        OP_B_T3a        = 0xF000,
+        OP_B_T4a        = 0xF000,
+        OP_AND_imm_T1   = 0xF000,
+        OP_TST_imm      = 0xF010,
+        OP_ORR_imm_T1   = 0xF040,
+        OP_MOV_imm_T2   = 0xF040,
+        OP_MVN_imm      = 0xF060,
+        OP_EOR_imm_T1   = 0xF080,
+        OP_ADD_imm_T3   = 0xF100,
+        OP_ADD_S_imm_T3 = 0xF110,
+        OP_CMN_imm      = 0xF110,
+        OP_ADC_imm      = 0xF140,
+        OP_SUB_imm_T3   = 0xF1A0,
+        OP_SUB_S_imm_T3 = 0xF1B0,
+        OP_CMP_imm_T2   = 0xF1B0,
+        OP_RSB_imm_T2   = 0xF1C0,
+        OP_RSB_S_imm_T2 = 0xF1D0,
+        OP_ADD_imm_T4   = 0xF200,
+        OP_MOV_imm_T3   = 0xF240,
+        OP_SUB_imm_T4   = 0xF2A0,
+        OP_MOVT         = 0xF2C0,
+        OP_UBFX_T1      = 0xF3C0,
+        OP_NOP_T2a      = 0xF3AF,
+        OP_DMB_T1a      = 0xF3BF,
+        OP_STRB_imm_T3  = 0xF800,
+        OP_STRB_reg_T2  = 0xF800,
+        OP_LDRB_imm_T3  = 0xF810,
+        OP_LDRB_reg_T2  = 0xF810,
+        OP_STRH_imm_T3  = 0xF820,
+        OP_STRH_reg_T2  = 0xF820,
+        OP_LDRH_reg_T2  = 0xF830,
+        OP_LDRH_imm_T3  = 0xF830,
+        OP_STR_imm_T4   = 0xF840,
+        OP_STR_reg_T2   = 0xF840,
+        OP_LDR_imm_T4   = 0xF850,
+        OP_LDR_reg_T2   = 0xF850,
+        OP_STRB_imm_T2  = 0xF880,
+        OP_LDRB_imm_T2  = 0xF890,
+        OP_STRH_imm_T2  = 0xF8A0,
+        OP_LDRH_imm_T2  = 0xF8B0,
+        OP_STR_imm_T3   = 0xF8C0,
+        OP_LDR_imm_T3   = 0xF8D0,
+        OP_LDRSB_reg_T2 = 0xF910,
+        OP_LDRSH_reg_T2 = 0xF930,
+        OP_LSL_reg_T2   = 0xFA00,
+        OP_LSR_reg_T2   = 0xFA20,
+        OP_ASR_reg_T2   = 0xFA40,
+        OP_ROR_reg_T2   = 0xFA60,
+        OP_CLZ          = 0xFAB0,
+        OP_SMULL_T1     = 0xFB80,
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+        OP_SDIV_T1      = 0xFB90,
+        OP_UDIV_T1      = 0xFBB0,
+#endif
+    } OpcodeID1;
+
+    typedef enum {
+        OP_VADD_T2b      = 0x0A00,
+        OP_VDIVb         = 0x0A00,
+        OP_FLDSb         = 0x0A00,
+        OP_VLDRb         = 0x0A00,
+        OP_VMOV_IMM_T2b  = 0x0A00,
+        OP_VMOV_T2b      = 0x0A40,
+        OP_VMUL_T2b      = 0x0A00,
+        OP_FSTSb         = 0x0A00,
+        OP_VSTRb         = 0x0A00,
+        OP_VMOV_StoCb    = 0x0A10,
+        OP_VMOV_CtoSb    = 0x0A10,
+        OP_VMOV_DtoCb    = 0x0A10,
+        OP_VMOV_CtoDb    = 0x0A10,
+        OP_VMRSb         = 0x0A10,
+        OP_VABS_T2b      = 0x0A40,
+        OP_VCMPb         = 0x0A40,
+        OP_VCVT_FPIVFPb  = 0x0A40,
+        OP_VNEG_T2b      = 0x0A40,
+        OP_VSUB_T2b      = 0x0A40,
+        OP_VSQRT_T1b     = 0x0A40,
+        OP_VCVTSD_T1b    = 0x0A40,
+        OP_VCVTDS_T1b    = 0x0A40,
+        OP_NOP_T2b       = 0x8000,
+        OP_DMB_SY_T1b    = 0x8F5F,
+        OP_DMB_ISHST_T1b = 0x8F5A,
+        OP_B_T3b         = 0x8000,
+        OP_B_T4b         = 0x9000,
+    } OpcodeID2;
+
+    struct FourFours {
+        FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+        {
+            m_u.f0 = f0;
+            m_u.f1 = f1;
+            m_u.f2 = f2;
+            m_u.f3 = f3;
+        }
+
+        union {
+            unsigned value;
+            struct {
+                unsigned f0 : 4;
+                unsigned f1 : 4;
+                unsigned f2 : 4;
+                unsigned f3 : 4;
+            };
+        } m_u;
+    };
+
+    class ARMInstructionFormatter;
+
+    // false means else!
+    static bool ifThenElseConditionBit(Condition condition, bool isIf)
+    {
+        return isIf ? (condition & 1) : !(condition & 1);
+    }
+    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | (ifThenElseConditionBit(condition, inst3if) << 2)
+            | (ifThenElseConditionBit(condition, inst4if) << 1)
+            | 1;
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | (ifThenElseConditionBit(condition, inst3if) << 2)
+            | 2;
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+    static uint8_t ifThenElse(Condition condition, bool inst2if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | 4;
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+
+    static uint8_t ifThenElse(Condition condition)
+    {
+        int mask = 8;
+        return (condition << 4) | mask;
+    }
+
+public:
+    
+    void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
+    }
+
+    void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if (rn == ARMRegisters::sp && imm.isUInt16()) {
+            ASSERT(!(imm.getUInt16() & 3));
+            if (!(rd & 8) && imm.isUInt10()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast(imm.getUInt10() >> 2));
+                return;
+            } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+                m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast(imm.getUInt9() >> 2));
+                return;
+            }
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+        else {
+            ASSERT(imm.isUInt12());
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+        }
+    }
+
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // NOTE: In an IT block, add doesn't modify the flags register.
+    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (rd == ARMRegisters::sp) {
+            mov(rd, rn);
+            rn = rd;
+        }
+
+        if (rd == rn)
+            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+        else if (rd == rm)
+            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+        else if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+        else
+            add(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    // Not allowed in an IT (if then) block.
+    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+    }
+
+    // Not allowed in an IT (if then) block?
+    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // Not allowed in an IT (if then) block.
+    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+        else
+            add_S(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+    }
+
+    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+        else
+            ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+    
+    // Only allowed in IT (if then) block if last instruction.
+    ALWAYS_INLINE AssemblerLabel b()
+    {
+        m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+        return m_formatter.label();
+    }
+    
+    // Only allowed in IT (if then) block if last instruction.
+    ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
+    {
+        ASSERT(rm != ARMRegisters::pc);
+        m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+        return m_formatter.label();
+    }
+
+    // Only allowed in IT (if then) block if last instruction.
+    ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
+    {
+        m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+        return m_formatter.label();
+    }
+
+    void bkpt(uint8_t imm = 0)
+    {
+        m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+    }
+
+    ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
+    }
+
+    ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+    }
+
+    ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        if (!(rn & 8) && imm.isUInt8())
+            m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+        else
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+    }
+
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+    {
+        if ((rn | rm) & 8)
+            cmp(rn, rm, ShiftTypeAndAmount());
+        else
+            m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+        else
+            eor(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void it(Condition cond)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+    }
+
+    ALWAYS_INLINE void it(Condition cond, bool inst2if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+    }
+
+    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+    }
+
+    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt7())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+            m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast(imm.getUInt10() >> 2));
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+    }
+    
+    ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
+    }
+
+    ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt7());
+        ASSERT(!((rt | rn) & 8));
+        m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed load;
+    // if index is not set then is is a post-index load.
+    //
+    // If wback is set rn is updated - this is a pre or post index load,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt12());
+        ASSERT(!(imm.getUInt12() & 1));
+
+        if (!((rt | rn) & 8) && imm.isUInt6())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed load;
+    // if index is not set then is is a post-index load.
+    //
+    // If wback is set rn is updated - this is a pre or post index load,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+    }
+
+    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(!BadReg(rt));   // Memory hint
+        ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt5())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+    }
+
+    void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+
+        ASSERT(!(offset & ~0xff));
+
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+    }
+
+    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+    
+    void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+        
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+        
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isValid());
+        ASSERT(!imm.isEncodedImm());
+        ASSERT(!BadReg(rd));
+        
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+    }
+    
+#if OS(LINUX)
+    static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
+    {
+        uint16_t* address = static_cast(instructionStart);
+        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(imm));
+        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(imm >> 16));
+        uint16_t instruction[] = {
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
+            static_cast(OP_CMP_reg_T2 | left)
+        };
+        performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
+        cacheFlush(address, sizeof(uint16_t) * 5);
+    }
+#else
+    static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isValid());
+        ASSERT(!imm.isEncodedImm());
+        ASSERT(!BadReg(rd));
+        
+        uint16_t* address = static_cast(instructionStart);
+        uint16_t instruction[] = {
+            twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
+            twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
+        };
+        performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
+        cacheFlush(address, sizeof(uint16_t) * 2);
+    }
+#endif
+
+    ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isValid());
+        ASSERT(!BadReg(rd));
+        
+        if ((rd < 8) && imm.isUInt8())
+            m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+        else if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+        else
+            movT3(rd, imm);
+    }
+
+    ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+    {
+        m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+    }
+
+    ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isUInt16());
+        ASSERT(!BadReg(rd));
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+    }
+
+    ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isEncodedImm());
+        ASSERT(!BadReg(rd));
+        
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+    }
+
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+    {
+        if (!((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+        else
+            mvn(rd, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+    {
+        ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+        sub(rd, zero, rm);
+    }
+
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+    }
+
+    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+        else
+            orr(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+        else
+            orr_S(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    ALWAYS_INLINE void pop(RegisterID dest)
+    {
+        if (dest < ARMRegisters::r8)
+            m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
+        else {
+            // Load postindexed with writeback.
+            ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+        }
+    }
+
+    ALWAYS_INLINE void pop(uint32_t registerList)
+    {
+        ASSERT(WTF::bitCount(registerList) > 1);
+        ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
+        ASSERT(!((1 << ARMRegisters::sp) & registerList));
+        m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
+    }
+
+    ALWAYS_INLINE void push(RegisterID src)
+    {
+        if (src < ARMRegisters::r8)
+            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
+        else if (src == ARMRegisters::lr)
+            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
+        else {
+            // Store preindexed with writeback.
+            str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+        }
+    }
+
+    ALWAYS_INLINE void push(uint32_t registerList)
+    {
+        ASSERT(WTF::bitCount(registerList) > 1);
+        ASSERT(!((1 << ARMRegisters::pc) & registerList));
+        ASSERT(!((1 << ARMRegisters::sp) & registerList));
+        m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
+    }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+    template
+    ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");        
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+    }
+#endif
+
+    ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rdLo));
+        ASSERT(!BadReg(rdHi));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        ASSERT(rdLo != rdHi);
+        m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt7())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+            m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast(imm.getUInt10() >> 2));
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed store;
+    // if index is not set then is is a post-index store.
+    //
+    // If wback is set rn is updated - this is a pre or post index store,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt7())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed store;
+    // if index is not set then is is a post-index store.
+    //
+    // If wback is set rn is updated - this is a pre or post index store,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+    
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isUInt12());
+        
+        if (!((rt | rn) & 8) && imm.isUInt6())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
+    }
+    
+    // If index is set, this is a regular offset or a pre-indexed store;
+    // if index is not set then is is a post-index store.
+    //
+    // If wback is set rn is updated - this is a pre or post index store,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+        
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT(!(offset & ~0xff));
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
+    }
+    
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+        
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+            ASSERT(!(imm.getUInt16() & 3));
+            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast(imm.getUInt9() >> 2));
+            return;
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+        else {
+            ASSERT(imm.isUInt12());
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+        }
+    }
+
+    ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+    {
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+        ASSERT(imm.isUInt12());
+
+        if (!((rd | rn) & 8) && !imm.getUInt12())
+            m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
+        else
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
+    }
+
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // NOTE: In an IT block, add doesn't modify the flags register.
+    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+        else
+            sub(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    // Not allowed in an IT (if then) block.
+    void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+            ASSERT(!(imm.getUInt16() & 3));
+            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast(imm.getUInt9() >> 2));
+            return;
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+    }
+
+    ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+    {
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+        ASSERT(imm.isUInt12());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+    }
+
+    // Not allowed in an IT (if then) block?
+    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // Not allowed in an IT (if then) block.
+    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+        else
+            sub_S(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+    }
+
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+    }
+
+    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+    {
+        if ((rn | rm) & 8)
+            tst(rn, rm, ShiftTypeAndAmount());
+        else
+            m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+    }
+
+    ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
+    {
+        ASSERT(lsb < 32);
+        ASSERT((width >= 1) && (width <= 32));
+        ASSERT((lsb + width) <= 32);
+        m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
+    }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+    ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+    }
+#endif
+
+    void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
+    }
+
+    void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+    }
+
+    void vcmpz(FPDoubleRegisterID rd)
+    {
+        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
+    }
+
+    void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+    {
+        // boolean values are 64bit (toInt, unsigned, roundZero)
+        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
+    }
+
+    void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        // boolean values are 64bit (toInt, unsigned, roundZero)
+        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
+    }
+    
+    void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        // boolean values are 64bit (toInt, unsigned, roundZero)
+        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
+    }
+
+    void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
+    }
+
+    void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
+    }
+    
+    void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
+    }
+
+    void vmov(RegisterID rd, FPSingleRegisterID rn)
+    {
+        ASSERT(!BadReg(rd));
+        m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
+    }
+
+    void vmov(FPSingleRegisterID rd, RegisterID rn)
+    {
+        ASSERT(!BadReg(rn));
+        m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
+    }
+
+    void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
+    {
+        ASSERT(!BadReg(rd1));
+        ASSERT(!BadReg(rd2));
+        m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
+    }
+
+    void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
+    {
+        ASSERT(!BadReg(rn1));
+        ASSERT(!BadReg(rn2));
+        m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
+    }
+
+    void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
+    {
+        m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
+    }
+
+    void vmrs(RegisterID reg = ARMRegisters::pc)
+    {
+        ASSERT(reg != ARMRegisters::sp);
+        m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
+    }
+
+    void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
+    }
+
+    void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
+    }
+
+    void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
+    }
+
+    void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
+    }
+
+    void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
+    }
+
+    void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+    }
+
+    void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
+    }
+    
+    void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
+    }
+
+    void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+    {
+        m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
+    }
+
+    void nop()
+    {
+        m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
+    }
+
+    void nopw()
+    {
+        m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
+    }
+    
+    static constexpr int16_t nopPseudo16()
+    {
+        return OP_NOP_T1;
+    }
+
+    static constexpr int32_t nopPseudo32()
+    {
+        return OP_NOP_T2a | (OP_NOP_T2b << 16);
+    }
+
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+    {
+        RELEASE_ASSERT(!(size % sizeof(int16_t)));
+
+        char* ptr = static_cast(base);
+        const size_t num32s = size / sizeof(int32_t);
+        for (size_t i = 0; i < num32s; i++) {
+            const int32_t insn = nopPseudo32();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr, &insn, sizeof(int32_t));
+            else
+                memcpy(ptr, &insn, sizeof(int32_t));
+            ptr += sizeof(int32_t);
+        }
+
+        const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
+        ASSERT(num16s == 0 || num16s == 1);
+        ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
+        if (num16s) {
+            const int16_t insn = nopPseudo16();
+            if (isCopyingToExecutableMemory)
+                performJITMemcpy(ptr, &insn, sizeof(int16_t));
+            else
+                memcpy(ptr, &insn, sizeof(int16_t));
+        }
+    }
+
+    void dmbSY()
+    {
+        m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b);
+    }
+
+    void dmbISHST()
+    {
+        m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b);
+    }
+
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        return m_formatter.label();
+    }
+
+    AssemblerLabel labelForWatchpoint()
+    {
+        AssemblerLabel result = m_formatter.label();
+        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = m_formatter.label();
+        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            if (UNLIKELY(static_cast(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
+                nopw();
+            else
+                nop();
+            result = m_formatter.label();
+        }
+        return result;
+    }
+    
+    AssemblerLabel align(int alignment)
+    {
+        while (!m_formatter.isAligned(alignment))
+            bkpt();
+
+        return label();
+    }
+    
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        ASSERT(label.isSet());
+        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+    }
+    
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+    
+    // Assembler admin methods:
+
+    static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+    {
+        return a.from() < b.from();
+    }
+
+    static bool canCompact(JumpType jumpType)
+    {
+        // The following cannot be compacted:
+        //   JumpFixed: represents custom jump sequence
+        //   JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+        //   JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+    }
+    
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    {
+        if (jumpType == JumpFixed)
+            return LinkInvalid;
+        
+        // for patchable jump we must leave space for the longest code sequence
+        if (jumpType == JumpNoConditionFixedSize)
+            return LinkBX;
+        if (jumpType == JumpConditionFixedSize)
+            return LinkConditionalBX;
+        
+        const int paddingSize = JUMP_ENUM_SIZE(jumpType);
+        
+        if (jumpType == JumpCondition) {
+            // 2-byte conditional T1
+            const uint16_t* jumpT1Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
+            if (canBeJumpT1(jumpT1Location, to))
+                return LinkJumpT1;
+            // 4-byte conditional T3
+            const uint16_t* jumpT3Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
+            if (canBeJumpT3(jumpT3Location, to))
+                return LinkJumpT3;
+            // 4-byte conditional T4 with IT
+            const uint16_t* conditionalJumpT4Location = 
+            reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
+            if (canBeJumpT4(conditionalJumpT4Location, to))
+                return LinkConditionalJumpT4;
+        } else {
+            // 2-byte unconditional T2
+            const uint16_t* jumpT2Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
+            if (canBeJumpT2(jumpT2Location, to))
+                return LinkJumpT2;
+            // 4-byte unconditional T4
+            const uint16_t* jumpT4Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
+            if (canBeJumpT4(jumpT4Location, to))
+                return LinkJumpT4;
+            // use long jump sequence
+            return LinkBX;
+        }
+        
+        ASSERT(jumpType == JumpCondition);
+        return LinkConditionalBX;
+    }
+    
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    {
+        JumpLinkType linkType = computeJumpType(record.type(), from, to);
+        record.setLinkType(linkType);
+        return linkType;
+    }
+    
+    Vector& jumpsToLink()
+    {
+        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+        return m_jumpsToLink;
+    }
+
+    static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
+    {
+        const uint16_t* fromInstruction = reinterpret_cast_ptr(fromInstruction8);
+        switch (record.linkType()) {
+        case LinkJumpT1:
+            linkJumpT1(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkJumpT2:
+            linkJumpT2(reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkJumpT3:
+            linkJumpT3(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkJumpT4:
+            linkJumpT4(reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkConditionalJumpT4:
+            linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkConditionalBX:
+            linkConditionalBX(record.condition(), reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        case LinkBX:
+            linkBX(reinterpret_cast_ptr(from), fromInstruction, to);
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+    void* unlinkedCode() { return m_formatter.data(); }
+    size_t codeSize() const { return m_formatter.codeSize(); }
+
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        ASSERT(call.isSet());
+        return call.m_offset;
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+    }
+
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        
+        uint16_t* location = reinterpret_cast(reinterpret_cast(code) + from.m_offset);
+        linkJumpAbsolute(location, location, to);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(!(reinterpret_cast(code) & 1));
+        ASSERT(from.isSet());
+
+        setPointer(reinterpret_cast(reinterpret_cast(code) + from.m_offset) - 1, to, false);
+    }
+
+    static void linkPointer(void* code, AssemblerLabel where, void* value)
+    {
+        setPointer(reinterpret_cast(code) + where.m_offset, value, false);
+    }
+
+    // The static relink and replace methods can use can use |from| for both
+    // the write and executable address for call and jump patching
+    // as they're modifying existing (linked) code, so the address being
+    // provided is correct for relative address computation.
+    static void relinkJump(void* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast(from) & 1));
+        ASSERT(!(reinterpret_cast(to) & 1));
+
+        linkJumpAbsolute(reinterpret_cast(from), reinterpret_cast(from), to);
+
+        cacheFlush(reinterpret_cast(from) - 5, 5 * sizeof(uint16_t));
+    }
+
+    static void relinkJumpToNop(void* from)
+    {
+        relinkJump(from, from);
+    }
+    
+    static void relinkCall(void* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast(from) & 1));
+
+        setPointer(reinterpret_cast(from) - 1, to, true);
+    }
+    
+    static void* readCallTarget(void* from)
+    {
+        return readPointer(reinterpret_cast(from) - 1);
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        ASSERT(!(reinterpret_cast(where) & 1));
+        
+        setInt32(where, value, true);
+    }
+    
+    static void repatchCompact(void* where, int32_t offset)
+    {
+        ASSERT(offset >= -255 && offset <= 255);
+
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        
+        offset |= (add << 9);
+        offset |= (1 << 10);
+        offset |= (1 << 11);
+
+        uint16_t* location = reinterpret_cast(where);
+        uint16_t instruction = location[1] & ~((1 << 12) - 1);
+        instruction |= offset;
+        performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
+        cacheFlush(location, sizeof(uint16_t) * 2);
+    }
+
+    static void repatchPointer(void* where, void* value)
+    {
+        ASSERT(!(reinterpret_cast(where) & 1));
+        
+        setPointer(where, value, true);
+    }
+
+    static void* readPointer(void* where)
+    {
+        return reinterpret_cast(readInt32(where));
+    }
+    
+    static void replaceWithJump(void* instructionStart, void* to)
+    {
+        ASSERT(!(bitwise_cast(instructionStart) & 1));
+        ASSERT(!(bitwise_cast(to) & 1));
+
+#if OS(LINUX)
+        if (canBeJumpT4(reinterpret_cast(instructionStart), to)) {
+            uint16_t* ptr = reinterpret_cast(instructionStart) + 2;
+            linkJumpT4(ptr, ptr, to);
+            cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+        } else {
+            uint16_t* ptr = reinterpret_cast(instructionStart) + 5;
+            linkBX(ptr, ptr, to);
+            cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
+        }
+#else
+        uint16_t* ptr = reinterpret_cast(instructionStart) + 2;
+        linkJumpT4(ptr, ptr, to);
+        cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+#endif
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+#if OS(LINUX)
+        return 10;
+#else
+        return 4;
+#endif
+    }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 10;
+    }
+    
+    static void replaceWithLoad(void* instructionStart)
+    {
+        ASSERT(!(bitwise_cast(instructionStart) & 1));
+        uint16_t* ptr = reinterpret_cast(instructionStart);
+        switch (ptr[0] & 0xFFF0) {
+        case OP_LDR_imm_T3:
+            break;
+        case OP_ADD_imm_T3: {
+            ASSERT(!(ptr[1] & 0xF000));
+            uint16_t instructions[2];
+            instructions[0] = ptr[0] & 0x000F;
+            instructions[0] |= OP_LDR_imm_T3;
+            instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
+            instructions[1] &= 0xF0FF;
+            performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
+            cacheFlush(ptr, sizeof(uint16_t) * 2);
+            break;
+        }
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    static void replaceWithAddressComputation(void* instructionStart)
+    {
+        ASSERT(!(bitwise_cast(instructionStart) & 1));
+        uint16_t* ptr = reinterpret_cast(instructionStart);
+        switch (ptr[0] & 0xFFF0) {
+        case OP_LDR_imm_T3: {
+            ASSERT(!(ptr[1] & 0x0F00));
+            uint16_t instructions[2];
+            instructions[0] = ptr[0] & 0x000F;
+            instructions[0] |= OP_ADD_imm_T3;
+            instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
+            instructions[1] &= 0x0FFF;
+            performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
+            cacheFlush(ptr, sizeof(uint16_t) * 2);
+            break;
+        }
+        case OP_ADD_imm_T3:
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+#if OS(LINUX)
+    static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+    {
+        asm volatile(
+            "push    {r7}\n"
+            "mov     r0, %0\n"
+            "mov     r1, %1\n"
+            "movw    r7, #0x2\n"
+            "movt    r7, #0xf\n"
+            "movs    r2, #0x0\n"
+            "svc     0x0\n"
+            "pop     {r7}\n"
+            :
+            : "r" (begin), "r" (end)
+            : "r0", "r1", "r2");
+    }
+#endif
+
+    static void cacheFlush(void* code, size_t size)
+    {
+#if OS(IOS)
+        sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+        size_t page = pageSize();
+        uintptr_t current = reinterpret_cast(code);
+        uintptr_t end = current + size;
+        uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+        if (end <= firstPageEnd) {
+            linuxPageFlush(current, end);
+            return;
+        }
+
+        linuxPageFlush(current, firstPageEnd);
+
+        for (current = firstPageEnd; current + page < end; current += page)
+            linuxPageFlush(current, current + page);
+
+        linuxPageFlush(current, end);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+    }
+
+private:
+    // VFP operations commonly take one or more 5-bit operands, typically representing a
+    // floating point register number.  This will commonly be encoded in the instruction
+    // in two parts, with one single bit field, and one 4-bit field.  In the case of
+    // double precision operands the high bit of the register number will be encoded
+    // separately, and for single precision operands the high bit of the register number
+    // will be encoded individually.
+    // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
+    // field to be encoded together in the instruction (the low 4-bits of a double
+    // register number, or the high 4-bits of a single register number), and bit 4
+    // contains the bit value to be encoded individually.
+    struct VFPOperand {
+        explicit VFPOperand(uint32_t value)
+            : m_value(value)
+        {
+            ASSERT(!(m_value & ~0x1f));
+        }
+
+        VFPOperand(FPDoubleRegisterID reg)
+            : m_value(reg)
+        {
+        }
+
+        VFPOperand(RegisterID reg)
+            : m_value(reg)
+        {
+        }
+
+        VFPOperand(FPSingleRegisterID reg)
+            : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
+        {
+        }
+
+        uint32_t bits1()
+        {
+            return m_value >> 4;
+        }
+
+        uint32_t bits4()
+        {
+            return m_value & 0xf;
+        }
+
+        uint32_t m_value;
+    };
+
+    VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
+    {
+        // Cannot specify rounding when converting to float.
+        ASSERT(toInteger || !isRoundZero);
+
+        uint32_t op = 0x8;
+        if (toInteger) {
+            // opc2 indicates both toInteger & isUnsigned.
+            op |= isUnsigned ? 0x4 : 0x5;
+            // 'op' field in instruction is isRoundZero
+            if (isRoundZero)
+                op |= 0x10;
+        } else {
+            ASSERT(!isRoundZero);
+            // 'op' field in instruction is isUnsigned
+            if (!isUnsigned)
+                op |= 0x10;
+        }
+        return VFPOperand(op);
+    }
+
+    static void setInt32(void* code, uint32_t value, bool flush)
+    {
+        uint16_t* location = reinterpret_cast(code);
+        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(value));
+        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(value >> 16));
+        uint16_t instructions[4];
+        instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+        instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+        performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
+        if (flush)
+            cacheFlush(location - 4, 4 * sizeof(uint16_t));
+    }
+    
+    static int32_t readInt32(void* code)
+    {
+        uint16_t* location = reinterpret_cast(code);
+        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+        
+        ARMThumbImmediate lo16;
+        ARMThumbImmediate hi16;
+        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
+        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
+        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
+        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
+        uint32_t result = hi16.asUInt16();
+        result <<= 16;
+        result |= lo16.asUInt16();
+        return static_cast(result);
+    }
+
+    static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
+    {
+        // Requires us to have planted a LDR_imm_T1
+        ASSERT(imm.isValid());
+        ASSERT(imm.isUInt7());
+        uint16_t* location = reinterpret_cast(code);
+        uint16_t instruction;
+        instruction = location[0] & ~((static_cast(0x7f) >> 2) << 6);
+        instruction |= (imm.getUInt7() >> 2) << 6;
+        performJITMemcpy(location, &instruction, sizeof(uint16_t));
+        cacheFlush(location, sizeof(uint16_t));
+    }
+
+    static void setPointer(void* code, void* value, bool flush)
+    {
+        setInt32(code, reinterpret_cast(value), flush);
+    }
+
+    static bool isB(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+    }
+
+    static bool isBX(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return (instruction[0] & 0xff87) == OP_BX;
+    }
+
+    static bool isMOV_imm_T3(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+    }
+
+    static bool isMOVT(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+    }
+
+    static bool isNOP_T1(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return instruction[0] == OP_NOP_T1;
+    }
+
+    static bool isNOP_T2(const void* address)
+    {
+        const uint16_t* instruction = static_cast(address);
+        return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+    }
+
+    static bool canBeJumpT1(const uint16_t* instruction, const void* target)
+    {
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T1 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        return ((relative << 23) >> 23) == relative;
+    }
+    
+    static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+    {
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T2 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        return ((relative << 20) >> 20) == relative;
+    }
+    
+    static bool canBeJumpT3(const uint16_t* instruction, const void* target)
+    {
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        return ((relative << 11) >> 11) == relative;
+    }
+    
+    static bool canBeJumpT4(const uint16_t* instruction, const void* target)
+    {
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        return ((relative << 7) >> 7) == relative;
+    }
+    
+    static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        ASSERT(canBeJumpT1(instruction, target));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T1 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+        performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
+    }
+    
+    static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        ASSERT(canBeJumpT2(instruction, target));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T2 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
+    }
+    
+    static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        ASSERT(canBeJumpT3(instruction, target));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        uint16_t instructions[2];
+        instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+        instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
+    }
+    
+    static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        ASSERT(canBeJumpT4(instruction, target));
+        
+        intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction));
+        // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+        if (relative >= 0)
+            relative ^= 0xC00000;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        uint16_t instructions[2];
+        instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+        instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+        performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
+    }
+    
+    static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        uint16_t newInstruction = ifThenElse(cond) | OP_IT;
+        performJITMemcpy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
+        linkJumpT4(writeTarget, instruction, target);
+    }
+    
+    static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT_UNUSED(instruction, !(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(writeTarget) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1));
+        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16));
+        uint16_t instructions[5];
+        instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+        instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+        instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+
+        performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
+    }
+    
+    static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        linkBX(writeTarget, instruction, target);
+        uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
+        performJITMemcpy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
+    }
+    
+    static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT(!(reinterpret_cast(instruction) & 1));
+        ASSERT(!(reinterpret_cast(target) & 1));
+        
+        ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+               || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+        if (canBeJumpT4(instruction, target)) {
+            // There may be a better way to fix this, but right now put the NOPs first, since in the
+            // case of an conditional branch this will be coming after an ITTT predicating *three*
+            // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
+            // variable wdith encoding - the previous instruction might *look* like an ITTT but
+            // actually be the second half of a 2-word op.
+            uint16_t instructions[3];
+            instructions[0] = OP_NOP_T1;
+            instructions[1] = OP_NOP_T2a;
+            instructions[2] = OP_NOP_T2b;
+            performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
+            linkJumpT4(writeTarget, instruction, target);
+        } else {
+            const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+            ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1));
+            ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16));
+
+            uint16_t instructions[5];
+            instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+            instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+            instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+            instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+            instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+            performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
+        }
+    }
+    
+    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+    {
+        return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+    }
+
+    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
+    {
+        result.m_value.i = (value >> 10) & 1;
+        result.m_value.imm4 = value & 15;
+    }
+
+    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+    {
+        return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+    }
+
+    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
+    {
+        result.m_value.imm3 = (value >> 12) & 7;
+        result.m_value.imm8 = value & 255;
+    }
+
+    class ARMInstructionFormatter {
+    public:
+        ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+        {
+            m_buffer.putShort(op | (rd << 8) | imm);
+        }
+        
+        ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+        }
+
+        ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+        {
+            m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+        }
+
+        ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
+        ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
+        ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+        }
+
+        ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
+        ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | (reg1 << 3) | reg2);
+        }
+
+        ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+        {
+            m_buffer.putShort(op | reg);
+            m_buffer.putShort(ff.m_u.value);
+        }
+        
+        ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+        {
+            m_buffer.putShort(op);
+            m_buffer.putShort(ff.m_u.value);
+        }
+        
+        ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+        {
+            m_buffer.putShort(op1);
+            m_buffer.putShort(op2);
+        }
+
+        ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
+        {
+            m_buffer.putShort(op1);
+            m_buffer.putShort(imm);
+        }
+        
+        ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+        {
+            ARMThumbImmediate newImm = imm;
+            newImm.m_value.imm4 = imm4;
+
+            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+        }
+
+        ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+        {
+            m_buffer.putShort(op | reg1);
+            m_buffer.putShort((reg2 << 12) | imm);
+        }
+
+        ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
+        {
+            m_buffer.putShort(op | reg1);
+            m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
+        }
+
+        // Formats up instructions of the pattern:
+        //    111111111B11aaaa:bbbb222SA2C2cccc
+        // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
+        // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
+        ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
+        {
+            ASSERT(!(op1 & 0x004f));
+            ASSERT(!(op2 & 0xf1af));
+            m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
+            m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
+        }
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        // (i.e. +/-(0..255) 32-bit words)
+        ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
+        {
+            bool up = true;
+            if (imm < 0) {
+                imm = -imm;
+                up = false;
+            }
+            
+            uint32_t offset = imm;
+            ASSERT(!(offset & ~0x3fc));
+            offset >>= 2;
+
+            m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
+            m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
+        }
+
+        // Administrative methods:
+
+        size_t codeSize() const { return m_buffer.codeSize(); }
+        AssemblerLabel label() const { return m_buffer.label(); }
+        bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+        void* data() const { return m_buffer.data(); }
+
+        unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+        AssemblerBuffer m_buffer;
+    } m_formatter;
+
+    Vector m_jumpsToLink;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
diff --git a/assembler/AbortReason.h b/assembler/AbortReason.h
new file mode 100644
index 0000000..32ae086
--- /dev/null
+++ b/assembler/AbortReason.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+// It's important to not change the values of existing abort reasons unless we really
+// have to. For this reason there is a BASIC-style numbering that should allow us to
+// sneak new reasons in without changing the numbering of existing reasons - at least
+// for a while.
+enum AbortReason {
+    AHCallFrameMisaligned                             =  10,
+    AHIndexingTypeIsValid                             =  20,
+    AHInsaneArgumentCount                             =  30,
+    AHIsNotCell                                       =  40,
+    AHIsNotInt32                                      =  50,
+    AHIsNotJSDouble                                   =  60,
+    AHIsNotJSInt32                                    =  70,
+    AHIsNotJSNumber                                   =  80,
+    AHIsNotNull                                       =  90,
+    AHStackPointerMisaligned                          = 100,
+    AHStructureIDIsValid                              = 110,
+    AHTagMaskNotInPlace                               = 120,
+    AHTagTypeNumberNotInPlace                         = 130,
+    AHTypeInfoInlineTypeFlagsAreValid                 = 140,
+    AHTypeInfoIsValid                                 = 150,
+    B3Oops                                            = 155,
+    DFGBailedAtTopOfBlock                             = 161,
+    DFGBailedAtEndOfNode                              = 162,
+    DFGBasicStorageAllocatorZeroSize                  = 170,
+    DFGIsNotCell                                      = 180,
+    DFGIneffectiveWatchpoint                          = 190,
+    DFGNegativeStringLength                           = 200,
+    DFGSlowPathGeneratorFellThrough                   = 210,
+    DFGUnreachableBasicBlock                          = 220,
+    DFGUnreachableNode                                = 225,
+    DFGUnreasonableOSREntryJumpDestination            = 230,
+    DFGVarargsThrowingPathDidNotThrow                 = 235,
+    FTLCrash                                          = 236,
+    JITDidReturnFromTailCall                          = 237,
+    JITDivOperandsAreNotNumbers                       = 240,
+    JITGetByValResultIsNotEmpty                       = 250,
+    JITNotSupported                                   = 260,
+    JITOffsetIsNotOutOfLine                           = 270,
+    JITUncoughtExceptionAfterCall                     = 275,
+    JITUnexpectedCallFrameSize                        = 277,
+    JITUnreasonableLoopHintJumpTarget                 = 280,
+    RPWUnreasonableJumpTarget                         = 290,
+    RepatchIneffectiveWatchpoint                      = 300,
+    RepatchInsaneArgumentCount                        = 310,
+    TGInvalidPointer                                  = 320,
+    TGNotSupported                                    = 330,
+    YARRNoInputConsumed                               = 340,
+};
+
+} // namespace JSC
diff --git a/assembler/AbstractMacroAssembler.h b/assembler/AbstractMacroAssembler.h
new file mode 100644
index 0000000..37b21d8
--- /dev/null
+++ b/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,1121 @@
+/*
+ * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "AbortReason.h"
+#include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include "CPU.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include "MacroAssemblerHelpers.h"
+#include "Options.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+#if ENABLE(ASSEMBLER)
+
+class AllowMacroScratchRegisterUsage;
+class DisallowMacroScratchRegisterUsage;
+class LinkBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
+
+template 
+class AbstractMacroAssembler {
+public:
+    typedef AbstractMacroAssembler AbstractMacroAssemblerType;
+    typedef AssemblerType AssemblerType_T;
+
+    typedef MacroAssemblerCodePtr CodePtr;
+    typedef MacroAssemblerCodeRef CodeRef;
+
+    class Jump;
+
+    typedef typename AssemblerType::RegisterID RegisterID;
+    typedef typename AssemblerType::FPRegisterID FPRegisterID;
+    
+    static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); }
+    static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); }
+
+    static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
+    static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
+
+    // Section 1: MacroAssembler operand types
+    //
+    // The following types are used as operands to MacroAssembler operations,
+    // describing immediate  and memory operands to the instructions to be planted.
+
+    enum Scale {
+        TimesOne,
+        TimesTwo,
+        TimesFour,
+        TimesEight,
+    };
+    
+    static Scale timesPtr()
+    {
+        if (sizeof(void*) == 4)
+            return TimesFour;
+        return TimesEight;
+    }
+    
+    struct BaseIndex;
+    
+    // Address:
+    //
+    // Describes a simple base-offset address.
+    struct Address {
+        explicit Address(RegisterID base, int32_t offset = 0)
+            : base(base)
+            , offset(offset)
+        {
+        }
+        
+        Address withOffset(int32_t additionalOffset)
+        {
+            return Address(base, offset + additionalOffset);
+        }
+        
+        BaseIndex indexedBy(RegisterID index, Scale) const;
+        
+        RegisterID base;
+        int32_t offset;
+    };
+
+    struct ExtendedAddress {
+        explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+            : base(base)
+            , offset(offset)
+        {
+        }
+        
+        RegisterID base;
+        intptr_t offset;
+    };
+
+    // ImplicitAddress:
+    //
+    // This class is used for explicit 'load' and 'store' operations
+    // (as opposed to situations in which a memory operand is provided
+    // to a generic operation, such as an integer arithmetic instruction).
+    //
+    // In the case of a load (or store) operation we want to permit
+    // addresses to be implicitly constructed, e.g. the two calls:
+    //
+    //     load32(Address(addrReg), destReg);
+    //     load32(addrReg, destReg);
+    //
+    // Are equivalent, and the explicit wrapping of the Address in the former
+    // is unnecessary.
+    struct ImplicitAddress {
+        ImplicitAddress(RegisterID base)
+            : base(base)
+            , offset(0)
+        {
+        }
+
+        ImplicitAddress(Address address)
+            : base(address.base)
+            , offset(address.offset)
+        {
+        }
+
+        RegisterID base;
+        int32_t offset;
+    };
+
+    // BaseIndex:
+    //
+    // Describes a complex addressing mode.
+    struct BaseIndex {
+        BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+            : base(base)
+            , index(index)
+            , scale(scale)
+            , offset(offset)
+        {
+        }
+        
+        RegisterID base;
+        RegisterID index;
+        Scale scale;
+        int32_t offset;
+        
+        BaseIndex withOffset(int32_t additionalOffset)
+        {
+            return BaseIndex(base, index, scale, offset + additionalOffset);
+        }
+    };
+
+    // AbsoluteAddress:
+    //
+    // Describes an memory operand given by a pointer.  For regular load & store
+    // operations an unwrapped void* will be used, rather than using this.
+    struct AbsoluteAddress {
+        explicit AbsoluteAddress(const void* ptr)
+            : m_ptr(ptr)
+        {
+        }
+
+        const void* m_ptr;
+    };
+
+    // TrustedImmPtr:
+    //
+    // A pointer sized immediate operand to an instruction - this is wrapped
+    // in a class requiring explicit construction in order to differentiate
+    // from pointers used as absolute addresses to memory operations
+    struct TrustedImmPtr {
+        TrustedImmPtr() { }
+        
+        explicit TrustedImmPtr(const void* value)
+            : m_value(value)
+        {
+        }
+        
+        // This is only here so that TrustedImmPtr(0) does not confuse the C++
+        // overload handling rules.
+        explicit TrustedImmPtr(int value)
+            : m_value(0)
+        {
+            ASSERT_UNUSED(value, !value);
+        }
+
+        explicit TrustedImmPtr(size_t value)
+            : m_value(reinterpret_cast(value))
+        {
+        }
+
+        intptr_t asIntptr()
+        {
+            return reinterpret_cast(m_value);
+        }
+
+        const void* m_value;
+    };
+
+    struct ImmPtr : private TrustedImmPtr
+    {
+        explicit ImmPtr(const void* value)
+            : TrustedImmPtr(value)
+        {
+        }
+
+        TrustedImmPtr asTrustedImmPtr() { return *this; }
+    };
+
+    // TrustedImm32:
+    //
+    // A 32bit immediate operand to an instruction - this is wrapped in a
+    // class requiring explicit construction in order to prevent RegisterIDs
+    // (which are implemented as an enum) from accidentally being passed as
+    // immediate values.
+    struct TrustedImm32 {
+        TrustedImm32() { }
+        
+        explicit TrustedImm32(int32_t value)
+            : m_value(value)
+        {
+        }
+
+#if !CPU(X86_64)
+        explicit TrustedImm32(TrustedImmPtr ptr)
+            : m_value(ptr.asIntptr())
+        {
+        }
+#endif
+
+        int32_t m_value;
+    };
+
+
+    struct Imm32 : private TrustedImm32 {
+        explicit Imm32(int32_t value)
+            : TrustedImm32(value)
+        {
+        }
+#if !CPU(X86_64)
+        explicit Imm32(TrustedImmPtr ptr)
+            : TrustedImm32(ptr)
+        {
+        }
+#endif
+        const TrustedImm32& asTrustedImm32() const { return *this; }
+
+    };
+    
+    // TrustedImm64:
+    //
+    // A 64bit immediate operand to an instruction - this is wrapped in a
+    // class requiring explicit construction in order to prevent RegisterIDs
+    // (which are implemented as an enum) from accidentally being passed as
+    // immediate values.
+    struct TrustedImm64 {
+        TrustedImm64() { }
+        
+        explicit TrustedImm64(int64_t value)
+            : m_value(value)
+        {
+        }
+
+#if CPU(X86_64) || CPU(ARM64)
+        explicit TrustedImm64(TrustedImmPtr ptr)
+            : m_value(ptr.asIntptr())
+        {
+        }
+#endif
+
+        int64_t m_value;
+    };
+
+    struct Imm64 : private TrustedImm64
+    {
+        explicit Imm64(int64_t value)
+            : TrustedImm64(value)
+        {
+        }
+#if CPU(X86_64) || CPU(ARM64)
+        explicit Imm64(TrustedImmPtr ptr)
+            : TrustedImm64(ptr)
+        {
+        }
+#endif
+        const TrustedImm64& asTrustedImm64() const { return *this; }
+    };
+    
+    // Section 2: MacroAssembler code buffer handles
+    //
+    // The following types are used to reference items in the code buffer
+    // during JIT code generation.  For example, the type Jump is used to
+    // track the location of a jump instruction so that it may later be
+    // linked to a label marking its destination.
+
+
+    // Label:
+    //
+    // A Label records a point in the generated instruction stream, typically such that
+    // it may be used as a destination for a jump.
+    class Label {
+        template
+        friend class AbstractMacroAssembler;
+        friend struct DFG::OSRExit;
+        friend class Jump;
+        friend class MacroAssemblerCodeRef;
+        friend class LinkBuffer;
+        friend class Watchpoint;
+
+    public:
+        Label()
+        {
+        }
+
+        Label(AbstractMacroAssemblerType* masm)
+            : m_label(masm->m_assembler.label())
+        {
+            masm->invalidateAllTempRegisters();
+        }
+
+        bool operator==(const Label& other) const { return m_label == other.m_label; }
+
+        bool isSet() const { return m_label.isSet(); }
+    private:
+        AssemblerLabel m_label;
+    };
+    
+    // ConvertibleLoadLabel:
+    //
+    // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+    // so that:
+    //
+    // loadPtr(Address(a, i), b)
+    //
+    // becomes:
+    //
+    // addPtr(TrustedImmPtr(i), a, b)
+    class ConvertibleLoadLabel {
+        template
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+        
+    public:
+        ConvertibleLoadLabel()
+        {
+        }
+        
+        ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
+            : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+        {
+        }
+        
+        bool isSet() const { return m_label.isSet(); }
+    private:
+        AssemblerLabel m_label;
+    };
+
+    // DataLabelPtr:
+    //
+    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+    // patched after the code has been generated.
+    class DataLabelPtr {
+        template
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+    public:
+        DataLabelPtr()
+        {
+        }
+
+        DataLabelPtr(AbstractMacroAssemblerType* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+
+        bool isSet() const { return m_label.isSet(); }
+        
+    private:
+        AssemblerLabel m_label;
+    };
+
+    // DataLabel32:
+    //
+    // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
+    // patched after the code has been generated.
+    class DataLabel32 {
+        template
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+    public:
+        DataLabel32()
+        {
+        }
+
+        DataLabel32(AbstractMacroAssemblerType* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+
+        AssemblerLabel label() const { return m_label; }
+
+    private:
+        AssemblerLabel m_label;
+    };
+
+    // DataLabelCompact:
+    //
+    // A DataLabelCompact is used to refer to a location in the code containing a
+    // compact immediate to be patched after the code has been generated.
+    class DataLabelCompact {
+        template
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+    public:
+        DataLabelCompact()
+        {
+        }
+        
+        DataLabelCompact(AbstractMacroAssemblerType* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+
+        DataLabelCompact(AssemblerLabel label)
+            : m_label(label)
+        {
+        }
+
+        AssemblerLabel label() const { return m_label; }
+
+    private:
+        AssemblerLabel m_label;
+    };
+
+    // Call:
+    //
+    // A Call object is a reference to a call instruction that has been planted
+    // into the code buffer - it is typically used to link the call, setting the
+    // relative offset such that when executed it will call to the desired
+    // destination.
+    class Call {
+        template
+        friend class AbstractMacroAssembler;
+
+    public:
+        enum Flags {
+            None = 0x0,
+            Linkable = 0x1,
+            Near = 0x2,
+            Tail = 0x4,
+            LinkableNear = 0x3,
+            LinkableNearTail = 0x7,
+        };
+
+        Call()
+            : m_flags(None)
+        {
+        }
+        
+        Call(AssemblerLabel jmp, Flags flags)
+            : m_label(jmp)
+            , m_flags(flags)
+        {
+        }
+
+        bool isFlagSet(Flags flag)
+        {
+            return m_flags & flag;
+        }
+
+        static Call fromTailJump(Jump jump)
+        {
+            return Call(jump.m_label, Linkable);
+        }
+
+        AssemblerLabel m_label;
+    private:
+        Flags m_flags;
+    };
+
+    // Jump:
+    //
+    // A jump object is a reference to a jump instruction that has been planted
+    // into the code buffer - it is typically used to link the jump, setting the
+    // relative offset such that when executed it will jump to the desired
+    // destination.
+    class Jump {
+        template
+        friend class AbstractMacroAssembler;
+        friend class Call;
+        friend struct DFG::OSRExit;
+        friend class LinkBuffer;
+    public:
+        Jump()
+        {
+        }
+        
+#if CPU(ARM_THUMB2)
+        // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+        Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+        {
+        }
+#elif CPU(ARM64)
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+        {
+        }
+
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+            , m_is64Bit(is64Bit)
+            , m_compareRegister(compareRegister)
+        {
+            ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
+        }
+
+        Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
+            : m_label(jmp)
+            , m_type(type)
+            , m_condition(condition)
+            , m_bitNumber(bitNumber)
+            , m_compareRegister(compareRegister)
+        {
+            ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
+        }
+#elif CPU(SH4)
+        Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+            : m_label(jmp)
+            , m_type(type)
+        {
+        }
+#else
+        Jump(AssemblerLabel jmp)    
+            : m_label(jmp)
+        {
+        }
+#endif
+        
+        Label label() const
+        {
+            Label result;
+            result.m_label = m_label;
+            return result;
+        }
+
+        void link(AbstractMacroAssemblerType* masm) const
+        {
+            masm->invalidateAllTempRegisters();
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+            masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+            masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(ARM64)
+            if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
+            else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
+            else
+                masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+            masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+            masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
+        }
+        
+        void linkTo(Label label, AbstractMacroAssemblerType* masm) const
+        {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+            masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+            masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#elif CPU(ARM64)
+            if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
+            else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
+            else
+                masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+            masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
+        }
+
+        bool isSet() const { return m_label.isSet(); }
+
+    private:
+        AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+        ARMv7Assembler::JumpType m_type;
+        ARMv7Assembler::Condition m_condition;
+#elif CPU(ARM64)
+        ARM64Assembler::JumpType m_type;
+        ARM64Assembler::Condition m_condition;
+        bool m_is64Bit;
+        unsigned m_bitNumber;
+        ARM64Assembler::RegisterID m_compareRegister;
+#endif
+#if CPU(SH4)
+        SH4Assembler::JumpType m_type;
+#endif
+    };
+
+    struct PatchableJump {
+        PatchableJump()
+        {
+        }
+
+        explicit PatchableJump(Jump jump)
+            : m_jump(jump)
+        {
+        }
+
+        operator Jump&() { return m_jump; }
+
+        Jump m_jump;
+    };
+
+    // JumpList:
+    //
+    // A JumpList is a set of Jump objects.
+    // All jumps in the set will be linked to the same destination.
+    class JumpList {
+    public:
+        typedef Vector JumpVector;
+        
+        JumpList() { }
+        
+        JumpList(Jump jump)
+        {
+            if (jump.isSet())
+                append(jump);
+        }
+
+        void link(AbstractMacroAssemblerType* masm) const
+        {
+            size_t size = m_jumps.size();
+            for (size_t i = 0; i < size; ++i)
+                m_jumps[i].link(masm);
+        }
+        
+        void linkTo(Label label, AbstractMacroAssemblerType* masm) const
+        {
+            size_t size = m_jumps.size();
+            for (size_t i = 0; i < size; ++i)
+                m_jumps[i].linkTo(label, masm);
+        }
+        
+        void append(Jump jump)
+        {
+            m_jumps.append(jump);
+        }
+        
+        void append(const JumpList& other)
+        {
+            m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+        }
+
+        bool empty()
+        {
+            return !m_jumps.size();
+        }
+        
+        void clear()
+        {
+            m_jumps.clear();
+        }
+        
+        const JumpVector& jumps() const { return m_jumps; }
+
+    private:
+        JumpVector m_jumps;
+    };
+
+
+    // Section 3: Misc admin methods
+#if ENABLE(DFG_JIT)
+    Label labelIgnoringWatchpoints()
+    {
+        Label result;
+        result.m_label = m_assembler.labelIgnoringWatchpoints();
+        return result;
+    }
+#else
+    Label labelIgnoringWatchpoints()
+    {
+        return label();
+    }
+#endif
+    
+    Label label()
+    {
+        return Label(this);
+    }
+    
+    void padBeforePatch()
+    {
+        // Rely on the fact that asking for a label already does the padding.
+        (void)label();
+    }
+    
+    Label watchpointLabel()
+    {
+        Label result;
+        result.m_label = m_assembler.labelForWatchpoint();
+        return result;
+    }
+    
+    Label align()
+    {
+        m_assembler.align(16);
+        return Label(this);
+    }
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+    class RegisterAllocationOffset {
+    public:
+        RegisterAllocationOffset(unsigned offset)
+            : m_offset(offset)
+        {
+        }
+
+        void checkOffsets(unsigned low, unsigned high)
+        {
+            RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+        }
+
+    private:
+        unsigned m_offset;
+    };
+
+    void addRegisterAllocationAtOffset(unsigned offset)
+    {
+        m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
+    }
+
+    void clearRegisterAllocationOffsets()
+    {
+        m_registerAllocationForOffsets.clear();
+    }
+
+    void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
+    {
+        if (offset1 > offset2)
+            std::swap(offset1, offset2);
+
+        size_t size = m_registerAllocationForOffsets.size();
+        for (size_t i = 0; i < size; ++i)
+            m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
+    }
+#endif
+
+    template
+    static ptrdiff_t differenceBetween(T from, U to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+    {
+        return reinterpret_cast(b.executableAddress()) - reinterpret_cast(a.executableAddress());
+    }
+
+    unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+    ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+    {
+        AssemblerType::cacheFlush(code, size);
+    }
+
+#if ENABLE(MASM_PROBE)
+
+    struct CPUState {
+        #define DECLARE_REGISTER(_type, _regName) \
+            _type _regName;
+        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+        #undef DECLARE_REGISTER
+
+        static const char* gprName(RegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case RegisterID::_regName: \
+                    return #_regName;
+                FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        static const char* fprName(FPRegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case FPRegisterID::_regName: \
+                    return #_regName;
+                FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        void*& gpr(RegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case RegisterID::_regName: \
+                    return _regName;
+                FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+
+        double& fpr(FPRegisterID regID)
+        {
+            switch (regID) {
+                #define DECLARE_REGISTER(_type, _regName) \
+                case FPRegisterID::_regName: \
+                    return _regName;
+                FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+                #undef DECLARE_REGISTER
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    };
+
+    struct ProbeContext;
+    typedef void (*ProbeFunction)(struct ProbeContext*);
+
+    struct ProbeContext {
+        ProbeFunction probeFunction;
+        void* arg1;
+        void* arg2;
+        CPUState cpu;
+
+        // Convenience methods:
+        void*& gpr(RegisterID regID) { return cpu.gpr(regID); }
+        double& fpr(FPRegisterID regID) { return cpu.fpr(regID); }
+        const char* gprName(RegisterID regID) { return cpu.gprName(regID); }
+        const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); }
+    };
+
+    // This function emits code to preserve the CPUState (e.g. registers),
+    // call a user supplied probe function, and restore the CPUState before
+    // continuing with other JIT generated code.
+    //
+    // The user supplied probe function will be called with a single pointer to
+    // a ProbeContext struct (defined above) which contains, among other things,
+    // the preserved CPUState. This allows the user probe function to inspect
+    // the CPUState at that point in the JIT generated code.
+    //
+    // If the user probe function alters the register values in the ProbeContext,
+    // the altered values will be loaded into the CPU registers when the probe
+    // returns.
+    //
+    // The ProbeContext is stack allocated and is only valid for the duration
+    // of the call to the user probe function.
+    //
+    // Note: probe() should be implemented by the target specific MacroAssembler.
+    // This prototype is only provided here to document the interface.
+
+    void probe(ProbeFunction, void* arg1, void* arg2);
+
+#endif // ENABLE(MASM_PROBE)
+
+    AssemblerType m_assembler;
+    
+    static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+    {
+        AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+    }
+
+    static void linkPointer(void* code, AssemblerLabel label, void* value)
+    {
+        AssemblerType::linkPointer(code, label, value);
+    }
+
+    static void* getLinkerAddress(void* code, AssemblerLabel label)
+    {
+        return AssemblerType::getRelocatedAddress(code, label);
+    }
+
+    static unsigned getLinkerCallReturnOffset(Call call)
+    {
+        return AssemblerType::getCallReturnOffset(call.m_label);
+    }
+
+    static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+    {
+        AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+    }
+    
+    static void repatchJumpToNop(CodeLocationJump jump)
+    {
+        AssemblerType::relinkJumpToNop(jump.dataLocation());
+    }
+
+    static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+    {
+        switch (nearCall.callMode()) {
+        case NearCallMode::Tail:
+            AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation());
+            return;
+        case NearCallMode::Regular:
+            AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+            return;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+    {
+        AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+    }
+    
+    static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+    {
+        AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+    }
+
+    static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+    {
+        AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+    }
+    
+    static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+    {
+        return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+    }
+    
+    static void replaceWithLoad(CodeLocationConvertibleLoad label)
+    {
+        AssemblerType::replaceWithLoad(label.dataLocation());
+    }
+    
+    static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+    {
+        AssemblerType::replaceWithAddressComputation(label.dataLocation());
+    }
+
+    template
+    void addLinkTask(const Functor& functor)
+    {
+        m_linkTasks.append(createSharedTask(functor));
+    }
+
+    void emitNops(size_t memoryToFillWithNopsInBytes)
+    {
+        AssemblerBuffer& buffer = m_assembler.buffer();
+        size_t startCodeSize = buffer.codeSize();
+        size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes;
+        buffer.ensureSpace(memoryToFillWithNopsInBytes);
+        bool isCopyingToExecutableMemory = false;
+        AssemblerType::fillNops(static_cast(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes, isCopyingToExecutableMemory);
+        buffer.setCodeSize(targetCodeSize);
+    }
+
+protected:
+    AbstractMacroAssembler()
+        : m_randomSource(0)
+    {
+        invalidateAllTempRegisters();
+    }
+
+    uint32_t random()
+    {
+        if (!m_randomSourceIsInitialized) {
+            m_randomSourceIsInitialized = true;
+            m_randomSource.setSeed(cryptographicallyRandomNumber());
+        }
+        return m_randomSource.getUint32();
+    }
+
+    bool m_randomSourceIsInitialized { false };
+    WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+    Vector m_registerAllocationForOffsets;
+#endif
+
+    static bool haveScratchRegisterForBlinding()
+    {
+        return false;
+    }
+    static RegisterID scratchRegisterForBlinding()
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return firstRegister();
+    }
+    static bool canBlind() { return false; }
+    static bool shouldBlindForSpecificArch(uint32_t) { return false; }
+    static bool shouldBlindForSpecificArch(uint64_t) { return false; }
+
+    class CachedTempRegister {
+        friend class DataLabelPtr;
+        friend class DataLabel32;
+        friend class DataLabelCompact;
+        friend class Jump;
+        friend class Label;
+
+    public:
+        CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
+            : m_masm(masm)
+            , m_registerID(registerID)
+            , m_value(0)
+            , m_validBit(1 << static_cast(registerID))
+        {
+            ASSERT(static_cast(registerID) < (sizeof(unsigned) * 8));
+        }
+
+        ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
+
+        ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
+
+        bool value(intptr_t& value)
+        {
+            value = m_value;
+            return m_masm->isTempRegisterValid(m_validBit);
+        }
+
+        void setValue(intptr_t value)
+        {
+            m_value = value;
+            m_masm->setTempRegisterValid(m_validBit);
+        }
+
+        ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
+
+    private:
+        AbstractMacroAssemblerType* m_masm;
+        RegisterID m_registerID;
+        intptr_t m_value;
+        unsigned m_validBit;
+    };
+
+    ALWAYS_INLINE void invalidateAllTempRegisters()
+    {
+        m_tempRegistersValidBits = 0;
+    }
+
+    ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
+    {
+        return (m_tempRegistersValidBits & registerMask);
+    }
+
+    ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
+    {
+        m_tempRegistersValidBits &=  ~registerMask;
+    }
+
+    ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
+    {
+        m_tempRegistersValidBits |= registerMask;
+    }
+
+    friend class AllowMacroScratchRegisterUsage;
+    friend class DisallowMacroScratchRegisterUsage;
+    unsigned m_tempRegistersValidBits;
+    bool m_allowScratchRegister { true };
+
+    Vector>> m_linkTasks;
+
+    friend class LinkBuffer;
+}; // class AbstractMacroAssembler
+
+template 
+inline typename AbstractMacroAssembler::BaseIndex
+AbstractMacroAssembler::Address::indexedBy(
+    typename AbstractMacroAssembler::RegisterID index,
+    typename AbstractMacroAssembler::Scale scale) const
+{
+    return BaseIndex(base, index, scale, offset);
+}
+
+#endif // ENABLE(ASSEMBLER)
+
+} // namespace JSC
diff --git a/assembler/AllowMacroScratchRegisterUsage.h b/assembler/AllowMacroScratchRegisterUsage.h
new file mode 100644
index 0000000..ed7806c
--- /dev/null
+++ b/assembler/AllowMacroScratchRegisterUsage.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+class AllowMacroScratchRegisterUsage {
+public:
+    AllowMacroScratchRegisterUsage(MacroAssembler& masm)
+        : m_masm(masm)
+        , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister)
+    {
+        masm.m_allowScratchRegister = true;
+    }
+
+    ~AllowMacroScratchRegisterUsage()
+    {
+        m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister;
+    }
+
+private:
+    MacroAssembler& m_masm;
+    bool m_oldValueOfAllowScratchRegister;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/AssemblerBuffer.h b/assembler/AssemblerBuffer.h
new file mode 100644
index 0000000..7340952
--- /dev/null
+++ b/assembler/AssemblerBuffer.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "ExecutableAllocator.h"
+#include "JITCompilationEffort.h"
+#include "stdint.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+    struct AssemblerLabel {
+        AssemblerLabel()
+            : m_offset(std::numeric_limits::max())
+        {
+        }
+
+        explicit AssemblerLabel(uint32_t offset)
+            : m_offset(offset)
+        {
+        }
+
+        bool isSet() const { return (m_offset != std::numeric_limits::max()); }
+
+        AssemblerLabel labelAtOffset(int offset) const
+        {
+            return AssemblerLabel(m_offset + offset);
+        }
+
+        bool operator==(const AssemblerLabel& other) const { return m_offset == other.m_offset; }
+
+        uint32_t m_offset;
+    };
+
+    class AssemblerData {
+        WTF_MAKE_NONCOPYABLE(AssemblerData);
+        static const size_t InlineCapacity = 128;
+    public:
+        AssemblerData()
+            : m_buffer(m_inlineBuffer)
+            , m_capacity(InlineCapacity)
+        {
+        }
+
+        AssemblerData(size_t initialCapacity)
+        {
+            if (initialCapacity <= InlineCapacity) {
+                m_capacity = InlineCapacity;
+                m_buffer = m_inlineBuffer;
+            } else {
+                m_capacity = initialCapacity;
+                m_buffer = static_cast(fastMalloc(m_capacity));
+            }
+        }
+
+        AssemblerData(AssemblerData&& other)
+        {
+            if (other.isInlineBuffer()) {
+                ASSERT(other.m_capacity == InlineCapacity);
+                memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
+                m_buffer = m_inlineBuffer;
+            } else
+                m_buffer = other.m_buffer;
+            m_capacity = other.m_capacity;
+
+            other.m_buffer = nullptr;
+            other.m_capacity = 0;
+        }
+
+        AssemblerData& operator=(AssemblerData&& other)
+        {
+            if (m_buffer && !isInlineBuffer())
+                fastFree(m_buffer);
+
+            if (other.isInlineBuffer()) {
+                ASSERT(other.m_capacity == InlineCapacity);
+                memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
+                m_buffer = m_inlineBuffer;
+            } else
+                m_buffer = other.m_buffer;
+            m_capacity = other.m_capacity;
+
+            other.m_buffer = nullptr;
+            other.m_capacity = 0;
+            return *this;
+        }
+
+        ~AssemblerData()
+        {
+            if (m_buffer && !isInlineBuffer())
+                fastFree(m_buffer);
+        }
+
+        char* buffer() const { return m_buffer; }
+
+        unsigned capacity() const { return m_capacity; }
+
+        void grow(unsigned extraCapacity = 0)
+        {
+            m_capacity = m_capacity + m_capacity / 2 + extraCapacity;
+            if (isInlineBuffer()) {
+                m_buffer = static_cast(fastMalloc(m_capacity));
+                memcpy(m_buffer, m_inlineBuffer, InlineCapacity);
+            } else
+                m_buffer = static_cast(fastRealloc(m_buffer, m_capacity));
+        }
+
+    private:
+        bool isInlineBuffer() const { return m_buffer == m_inlineBuffer; }
+        char* m_buffer;
+        char m_inlineBuffer[InlineCapacity];
+        unsigned m_capacity;
+    };
+
+    class AssemblerBuffer {
+    public:
+        AssemblerBuffer()
+            : m_storage()
+            , m_index(0)
+        {
+        }
+
+        bool isAvailable(unsigned space)
+        {
+            return m_index + space <= m_storage.capacity();
+        }
+
+        void ensureSpace(unsigned space)
+        {
+            while (!isAvailable(space))
+                outOfLineGrow();
+        }
+
+        bool isAligned(int alignment) const
+        {
+            return !(m_index & (alignment - 1));
+        }
+
+        void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+        void putByte(int8_t value) { putIntegral(value); }
+        void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+        void putShort(int16_t value) { putIntegral(value); }
+        void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+        void putInt(int32_t value) { putIntegral(value); }
+        void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+        void putInt64(int64_t value) { putIntegral(value); }
+
+        void* data() const
+        {
+            return m_storage.buffer();
+        }
+
+        size_t codeSize() const
+        {
+            return m_index;
+        }
+
+        void setCodeSize(size_t index)
+        {
+            // Warning: Only use this if you know exactly what you are doing.
+            // For example, say you want 40 bytes of nops, it's ok to grow
+            // and then fill 40 bytes of nops using bigger instructions.
+            m_index = index;
+            ASSERT(m_index <= m_storage.capacity());
+        }
+
+        AssemblerLabel label() const
+        {
+            return AssemblerLabel(m_index);
+        }
+
+        unsigned debugOffset() { return m_index; }
+
+        AssemblerData&& releaseAssemblerData() { return WTFMove(m_storage); }
+
+        // LocalWriter is a trick to keep the storage buffer and the index
+        // in memory while issuing multiple Stores.
+        // It is created in a block scope and its attribute can stay live
+        // between writes.
+        //
+        // LocalWriter *CANNOT* be mixed with other types of access to AssemblerBuffer.
+        // AssemblerBuffer cannot be used until its LocalWriter goes out of scope.
+        class LocalWriter {
+        public:
+            LocalWriter(AssemblerBuffer& buffer, unsigned requiredSpace)
+                : m_buffer(buffer)
+            {
+                buffer.ensureSpace(requiredSpace);
+                m_storageBuffer = buffer.m_storage.buffer();
+                m_index = buffer.m_index;
+#if !defined(NDEBUG)
+                m_initialIndex = m_index;
+                m_requiredSpace = requiredSpace;
+#endif
+            }
+
+            ~LocalWriter()
+            {
+                ASSERT(m_index - m_initialIndex <= m_requiredSpace);
+                ASSERT(m_buffer.m_index == m_initialIndex);
+                ASSERT(m_storageBuffer == m_buffer.m_storage.buffer());
+                m_buffer.m_index = m_index;
+            }
+
+            void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+            void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+            void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+            void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+        private:
+            template
+            void putIntegralUnchecked(IntegralType value)
+            {
+                ASSERT(m_index + sizeof(IntegralType) <= m_buffer.m_storage.capacity());
+                *reinterpret_cast_ptr(m_storageBuffer + m_index) = value;
+                m_index += sizeof(IntegralType);
+            }
+            AssemblerBuffer& m_buffer;
+            char* m_storageBuffer;
+            unsigned m_index;
+#if !defined(NDEBUG)
+            unsigned m_initialIndex;
+            unsigned m_requiredSpace;
+#endif
+        };
+
+    protected:
+        template
+        void putIntegral(IntegralType value)
+        {
+            unsigned nextIndex = m_index + sizeof(IntegralType);
+            if (UNLIKELY(nextIndex > m_storage.capacity()))
+                outOfLineGrow();
+            ASSERT(isAvailable(sizeof(IntegralType)));
+            *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value;
+            m_index = nextIndex;
+        }
+
+        template
+        void putIntegralUnchecked(IntegralType value)
+        {
+            ASSERT(isAvailable(sizeof(IntegralType)));
+            *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value;
+            m_index += sizeof(IntegralType);
+        }
+
+        void append(const char* data, int size)
+        {
+            if (!isAvailable(size))
+                grow(size);
+
+            memcpy(m_storage.buffer() + m_index, data, size);
+            m_index += size;
+        }
+
+        void grow(int extraCapacity = 0)
+        {
+            m_storage.grow(extraCapacity);
+        }
+
+    private:
+        NEVER_INLINE void outOfLineGrow()
+        {
+            m_storage.grow();
+        }
+
+        friend LocalWriter;
+
+        AssemblerData m_storage;
+        unsigned m_index;
+    };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/AssemblerBufferWithConstantPool.h b/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000..3b63288
--- /dev/null
+++ b/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include 
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+    On a constant pool 4 or 8 bytes data can be stored. The values can be
+    constants or addresses. The addresses should be 32 or 64 bits. The constants
+    should be double-precisions float or integer numbers which are hard to be
+    encoded as few machine instructions.
+
+    TODO: The pool is desinged to handle both 32 and 64 bits values, but
+    currently only the 4 bytes constants are implemented and tested.
+
+    The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+    into the instruction stream - protected by a jump instruction from the
+    execution flow.
+
+    The flush mechanism is called when no space remain to insert the next instruction
+    into the pool. Three values are used to determine when the constant pool itself
+    have to be inserted into the instruction stream (Assembler Buffer):
+
+    - maxPoolSize: size of the constant pool in bytes, this value cannot be
+        larger than the maximum offset of a PC relative memory load
+
+    - barrierSize: size of jump instruction in bytes which protects the
+        constant pool from execution
+
+    - maxInstructionSize: maximum length of a machine instruction in bytes
+
+    There are some callbacks which solve the target architecture specific
+    address handling:
+
+    - TYPE patchConstantPoolLoad(TYPE load, int value):
+        patch the 'load' instruction with the index of the constant in the
+        constant pool and return the patched instruction.
+
+    - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+        patch the a PC relative load instruction at 'loadAddr' address with the
+        final relative offset. The offset can be computed with help of
+        'constPoolAddr' (the address of the constant pool) and index of the
+        constant (which is stored previously in the load instruction itself).
+
+    - TYPE placeConstantPoolBarrier(int size):
+        return with a constant pool barrier instruction which jumps over the
+        constant pool.
+
+    The 'put*WithConstant*' functions should be used to place a data into the
+    constant pool.
+*/
+
+template 
+class AssemblerBufferWithConstantPool : public AssemblerBuffer {
+    typedef SegmentedVector LoadOffsets;
+    using AssemblerBuffer::putIntegral;
+    using AssemblerBuffer::putIntegralUnchecked;
+public:
+    typedef struct {
+        short high;
+        short low;
+    } TwoShorts;
+
+    enum {
+        UniqueConst,
+        ReusableConst,
+        UnusedEntry,
+    };
+
+    AssemblerBufferWithConstantPool()
+        : AssemblerBuffer()
+        , m_numConsts(0)
+        , m_maxDistance(maxPoolSize)
+        , m_lastConstDelta(0)
+    {
+        m_pool = static_cast(fastMalloc(maxPoolSize));
+        m_mask = static_cast(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+    }
+
+    ~AssemblerBufferWithConstantPool()
+    {
+        fastFree(m_mask);
+        fastFree(m_pool);
+    }
+
+    void ensureSpace(int space)
+    {
+        flushIfNoSpaceFor(space);
+        AssemblerBuffer::ensureSpace(space);
+    }
+
+    void ensureSpace(int insnSpace, int constSpace)
+    {
+        flushIfNoSpaceFor(insnSpace, constSpace);
+        AssemblerBuffer::ensureSpace(insnSpace);
+    }
+
+    void ensureSpaceForAnyInstruction(int amount = 1)
+    {
+        flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t));
+    }
+
+    bool isAligned(int alignment)
+    {
+        flushIfNoSpaceFor(alignment);
+        return AssemblerBuffer::isAligned(alignment);
+    }
+
+    void putByteUnchecked(int value)
+    {
+        AssemblerBuffer::putByteUnchecked(value);
+        correctDeltas(1);
+    }
+
+    void putByte(int value)
+    {
+        flushIfNoSpaceFor(1);
+        AssemblerBuffer::putByte(value);
+        correctDeltas(1);
+    }
+
+    void putShortUnchecked(int value)
+    {
+        AssemblerBuffer::putShortUnchecked(value);
+        correctDeltas(2);
+    }
+
+    void putShort(int value)
+    {
+        flushIfNoSpaceFor(2);
+        AssemblerBuffer::putShort(value);
+        correctDeltas(2);
+    }
+
+    void putIntUnchecked(int value)
+    {
+        AssemblerBuffer::putIntUnchecked(value);
+        correctDeltas(4);
+    }
+
+    void putInt(int value)
+    {
+        flushIfNoSpaceFor(4);
+        AssemblerBuffer::putInt(value);
+        correctDeltas(4);
+    }
+
+    void putInt64Unchecked(int64_t value)
+    {
+        AssemblerBuffer::putInt64Unchecked(value);
+        correctDeltas(8);
+    }
+
+    void putIntegral(TwoShorts value)
+    {
+        putIntegral(value.high);
+        putIntegral(value.low);
+    }
+
+    void putIntegralUnchecked(TwoShorts value)
+    {
+        putIntegralUnchecked(value.high);
+        putIntegralUnchecked(value.low);
+    }
+
+    void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
+    {
+        putIntegralWithConstantInt(insn, constant, isReusable);
+    }
+
+    void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+    {
+        putIntegralWithConstantInt(insn, constant, isReusable);
+    }
+
+    // This flushing mechanism can be called after any unconditional jumps.
+    void flushWithoutBarrier(bool isForced = false)
+    {
+        // Flush if constant pool is more than 60% full to avoid overuse of this function.
+        if (isForced || 5 * static_cast(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t))
+            flushConstantPool(false);
+    }
+
+    uint32_t* poolAddress()
+    {
+        return m_pool;
+    }
+
+    int sizeOfConstantPool()
+    {
+        return m_numConsts;
+    }
+
+    void flushConstantPool(bool useBarrier = true)
+    {
+        if (!m_numConsts)
+            return;
+        int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+        if (alignPool)
+            alignPool = sizeof(uint64_t) - alignPool;
+
+        // Callback to protect the constant pool from execution
+        if (useBarrier)
+            putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+        if (alignPool) {
+            if (alignPool & 1)
+                AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+            if (alignPool & 2)
+                AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+            if (alignPool & 4)
+                AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+        }
+
+        int constPoolOffset = codeSize();
+        append(reinterpret_cast(m_pool), m_numConsts * sizeof(uint32_t));
+
+        // Patch each PC relative load
+        for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+            void* loadAddr = reinterpret_cast(data()) + *iter;
+            AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast(data()) + constPoolOffset);
+        }
+
+        m_loadOffsets.clear();
+        m_numConsts = 0;
+    }
+
+private:
+    void correctDeltas(int insnSize)
+    {
+        m_maxDistance -= insnSize;
+        m_lastConstDelta -= insnSize;
+        if (m_lastConstDelta < 0)
+            m_lastConstDelta = 0;
+    }
+
+    void correctDeltas(int insnSize, int constSize)
+    {
+        correctDeltas(insnSize);
+
+        m_maxDistance -= m_lastConstDelta;
+        m_lastConstDelta = constSize;
+    }
+
+    template
+    void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable)
+    {
+        if (!m_numConsts)
+            m_maxDistance = maxPoolSize;
+        flushIfNoSpaceFor(sizeof(IntegralType), 4);
+
+        m_loadOffsets.append(codeSize());
+        if (isReusable) {
+            for (int i = 0; i < m_numConsts; ++i) {
+                if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+                    putIntegral(static_cast(AssemblerType::patchConstantPoolLoad(insn, i)));
+                    correctDeltas(sizeof(IntegralType));
+                    return;
+                }
+            }
+        }
+
+        m_pool[m_numConsts] = constant;
+        m_mask[m_numConsts] = static_cast(isReusable ? ReusableConst : UniqueConst);
+
+        putIntegral(static_cast(AssemblerType::patchConstantPoolLoad(insn, m_numConsts)));
+        ++m_numConsts;
+
+        correctDeltas(sizeof(IntegralType), 4);
+    }
+
+    void flushIfNoSpaceFor(int nextInsnSize)
+    {
+        if (m_numConsts == 0)
+            return;
+        int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+        if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+            flushConstantPool();
+    }
+
+    void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+    {
+        if (m_numConsts == 0)
+            return;
+        if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+            (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+            flushConstantPool();
+    }
+
+    uint32_t* m_pool;
+    char* m_mask;
+    LoadOffsets m_loadOffsets;
+
+    int m_numConsts;
+    int m_maxDistance;
+    int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/AssemblerCommon.h b/assembler/AssemblerCommon.h
new file mode 100644
index 0000000..2c6cb35
--- /dev/null
+++ b/assembler/AssemblerCommon.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+ALWAYS_INLINE bool isIOS()
+{
+#if PLATFORM(IOS)
+    return true;
+#else
+    return false;
+#endif
+}
+
+ALWAYS_INLINE bool isInt9(int32_t value)
+{
+    return value == ((value << 23) >> 23);
+}
+
+template
+ALWAYS_INLINE bool isUInt12(Type value)
+{
+    return !(value & ~static_cast(0xfff));
+}
+
+template
+ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset)
+{
+    int32_t maxPImm = 4095 * (datasize / 8);
+    if (offset < 0)
+        return false;
+    if (offset > maxPImm)
+        return false;
+    if (offset & ((datasize / 8) - 1))
+        return false;
+    return true;
+}
+
+ALWAYS_INLINE bool isValidSignedImm9(int32_t value)
+{
+    return isInt9(value);
+}
+
+class ARM64LogicalImmediate {
+public:
+    static ARM64LogicalImmediate create32(uint32_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // First look for a 32-bit pattern, then for repeating 16-bit
+        // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<32>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+        if ((value & 0xffff) != (value >> 16))
+            return InvalidLogicalImmediate;
+        value &= 0xffff;
+
+        if (findBitRange<16>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+        if ((value & 0xff) != (value >> 8))
+            return InvalidLogicalImmediate;
+        value &= 0xff;
+
+        if (findBitRange<8>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+        if ((value & 0xf) != (value >> 4))
+            return InvalidLogicalImmediate;
+        value &= 0xf;
+
+        if (findBitRange<4>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+        if ((value & 0x3) != (value >> 2))
+            return InvalidLogicalImmediate;
+        value &= 0x3;
+
+        if (findBitRange<2>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+        return InvalidLogicalImmediate;
+    }
+
+    static ARM64LogicalImmediate create64(uint64_t value)
+    {
+        // Check for 0, -1 - these cannot be encoded.
+        if (!value || !~value)
+            return InvalidLogicalImmediate;
+
+        // Look for a contiguous bit range.
+        unsigned hsb, lsb;
+        bool inverted;
+        if (findBitRange<64>(value, hsb, lsb, inverted))
+            return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+        // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+        if (static_cast(value) == static_cast(value >> 32))
+            return create32(static_cast(value));
+        return InvalidLogicalImmediate;
+    }
+
+    int value() const
+    {
+        ASSERT(isValid());
+        return m_value;
+    }
+
+    bool isValid() const
+    {
+        return m_value != InvalidLogicalImmediate;
+    }
+
+    bool is64bit() const
+    {
+        return m_value & (1 << 12);
+    }
+
+private:
+    ARM64LogicalImmediate(int value)
+        : m_value(value)
+    {
+    }
+
+    // Generate a mask with bits in the range hsb..0 set, for example:
+    //   hsb:63 = 0xffffffffffffffff
+    //   hsb:42 = 0x000007ffffffffff
+    //   hsb: 0 = 0x0000000000000001
+    static uint64_t mask(unsigned hsb)
+    {
+        ASSERT(hsb < 64);
+        return 0xffffffffffffffffull >> (63 - hsb);
+    }
+
+    template
+    static void partialHSB(uint64_t& value, unsigned&result)
+    {
+        if (value & (0xffffffffffffffffull << N)) {
+            result += N;
+            value >>= N;
+        }
+    }
+
+    // Find the bit number of the highest bit set in a non-zero value, for example:
+    //   0x8080808080808080 = hsb:63
+    //   0x0000000000000001 = hsb: 0
+    //   0x000007ffffe00000 = hsb:42
+    static unsigned highestSetBit(uint64_t value)
+    {
+        ASSERT(value);
+        unsigned hsb = 0;
+        partialHSB<32>(value, hsb);
+        partialHSB<16>(value, hsb);
+        partialHSB<8>(value, hsb);
+        partialHSB<4>(value, hsb);
+        partialHSB<2>(value, hsb);
+        partialHSB<1>(value, hsb);
+        return hsb;
+    }
+
+    // This function takes a value and a bit width, where value obeys the following constraints:
+    //   * bits outside of the width of the value must be zero.
+    //   * bits within the width of value must neither be all clear or all set.
+    // The input is inspected to detect values that consist of either two or three contiguous
+    // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+    // if the range is set, inverted will be false, and if the range is clear, inverted will
+    // be true. For example (with width 8):
+    //   00001111 = hsb:3, lsb:0, inverted:false
+    //   11110000 = hsb:3, lsb:0, inverted:true
+    //   00111100 = hsb:5, lsb:2, inverted:false
+    //   11000011 = hsb:5, lsb:2, inverted:true
+    template
+    static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+    {
+        ASSERT(value & mask(width - 1));
+        ASSERT(value != mask(width - 1));
+        ASSERT(!(value & ~mask(width - 1)));
+
+        // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+        // This halves the number of patterns we need to look for.
+        const uint64_t msb = 1ull << (width - 1);
+        if ((inverted = (value & msb)))
+            value ^= mask(width - 1);
+
+        // Find the highest set bit in value, generate a corresponding mask & flip all
+        // bits under it.
+        hsb = highestSetBit(value);
+        value ^= mask(hsb);
+        if (!value) {
+            // If this cleared the value, then the range hsb..0 was all set.
+            lsb = 0;
+            return true;
+        }
+
+        // Try making one more mask, and flipping the bits!
+        lsb = highestSetBit(value);
+        value ^= mask(lsb);
+        if (!value) {
+            // Success - but lsb actually points to the hsb of a third range - add one
+            // to get to the lsb of the mid range.
+            ++lsb;
+            return true;
+        }
+
+        return false;
+    }
+
+    // Encodes the set of immN:immr:imms fields found in a logical immediate.
+    template
+    static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+    {
+        // Check width is a power of 2!
+        ASSERT(!(width & (width -1)));
+        ASSERT(width <= 64 && width >= 2);
+        ASSERT(hsb >= lsb);
+        ASSERT(hsb < width);
+
+        int immN = 0;
+        int imms = 0;
+        int immr = 0;
+
+        // For 64-bit values this is easy - just set immN to true, and imms just
+        // contains the bit number of the highest set bit of the set range. For
+        // values with narrower widths, these are encoded by a leading set of
+        // one bits, followed by a zero bit, followed by the remaining set of bits
+        // being the high bit of the range. For a 32-bit immediate there are no
+        // leading one bits, just a zero followed by a five bit number. For a
+        // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+        // bit-position, etc.
+        if (width == 64)
+            immN = 1;
+        else
+            imms = 63 & ~(width + width - 1);
+
+        if (inverted) {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x80000000ffffffff (in this case with lsb 32).
+            // The ror should be by 1, imms (effectively set width minus 1) is
+            // 32. Set width is full width minus cleared width.
+            immr = (width - 1) - hsb;
+            imms |= (width - ((hsb - lsb) + 1)) - 1;
+        } else {
+            // if width is 64 & hsb is 62, then we have a value something like:
+            //   0x7fffffff00000000 (in this case with lsb 32).
+            // The value is effectively rol'ed by lsb, which is equivalent to
+            // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+            // is hsb - lsb.
+            immr = (width - lsb) & (width - 1);
+            imms |= hsb - lsb;
+        }
+
+        return immN << 12 | immr << 6 | imms;
+    }
+
+    static const int InvalidLogicalImmediate = -1;
+
+    int m_value;
+};
+
+} // namespace JSC.
diff --git a/assembler/CPU.h b/assembler/CPU.h
new file mode 100644
index 0000000..2d2b486
--- /dev/null
+++ b/assembler/CPU.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Options.h"
+
+namespace JSC {
+
+inline bool isARMv7IDIVSupported()
+{
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isARM64()
+{
+#if CPU(ARM64)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isX86()
+{
+#if CPU(X86_64) || CPU(X86)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool isX86_64()
+{
+#if CPU(X86_64)
+    return true;
+#else
+    return false;
+#endif
+}
+
+inline bool optimizeForARMv7IDIVSupported()
+{
+    return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForARM64()
+{
+    return isARM64() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForX86()
+{
+    return isX86() && Options::useArchitectureSpecificOptimizations();
+}
+
+inline bool optimizeForX86_64()
+{
+    return isX86_64() && Options::useArchitectureSpecificOptimizations();
+}
+
+} // namespace JSC
+
diff --git a/assembler/CodeLocation.h b/assembler/CodeLocation.h
new file mode 100644
index 0000000..a115ec3
--- /dev/null
+++ b/assembler/CodeLocation.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+enum NearCallMode { Regular, Tail };
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabelCompact;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+class CodeLocationConvertibleLoad;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name).  These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know.  When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart.  To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+    CodeLocationInstruction instructionAtOffset(int offset);
+    CodeLocationLabel labelAtOffset(int offset);
+    CodeLocationJump jumpAtOffset(int offset);
+    CodeLocationCall callAtOffset(int offset);
+    CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode);
+    CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+    CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+    CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
+    CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset);
+
+protected:
+    CodeLocationCommon()
+    {
+    }
+
+    CodeLocationCommon(MacroAssemblerCodePtr location)
+        : MacroAssemblerCodePtr(location)
+    {
+    }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+    CodeLocationInstruction() {}
+    explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationInstruction(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+    CodeLocationLabel() {}
+    explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationLabel(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+    CodeLocationJump() {}
+    explicit CodeLocationJump(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationJump(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+    CodeLocationCall() {}
+    explicit CodeLocationCall(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationCall(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+    CodeLocationNearCall() {}
+    explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode)
+        : CodeLocationCommon(location), m_callMode(callMode) { }
+    explicit CodeLocationNearCall(void* location, NearCallMode callMode)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { }
+    NearCallMode callMode() { return m_callMode; }
+private:
+    NearCallMode m_callMode = NearCallMode::Regular;
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+    CodeLocationDataLabel32() {}
+    explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationDataLabel32(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelCompact : public CodeLocationCommon {
+public:
+    CodeLocationDataLabelCompact() { }
+    explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) { }
+    explicit CodeLocationDataLabelCompact(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+    CodeLocationDataLabelPtr() {}
+    explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationDataLabelPtr(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationConvertibleLoad : public CodeLocationCommon {
+public:
+    CodeLocationConvertibleLoad() { }
+    explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) { }
+    explicit CodeLocationConvertibleLoad(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationInstruction(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationLabel(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationJump(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationCall(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationNearCall(reinterpret_cast(dataLocation()) + offset, callMode);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationDataLabelPtr(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationDataLabel32(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationDataLabelCompact(reinterpret_cast(dataLocation()) + offset);
+}
+
+inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationConvertibleLoad(reinterpret_cast(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/DisallowMacroScratchRegisterUsage.h b/assembler/DisallowMacroScratchRegisterUsage.h
new file mode 100644
index 0000000..91f0389
--- /dev/null
+++ b/assembler/DisallowMacroScratchRegisterUsage.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+class DisallowMacroScratchRegisterUsage {
+public:
+    DisallowMacroScratchRegisterUsage(MacroAssembler& masm)
+        : m_masm(masm)
+        , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister)
+    {
+        masm.m_allowScratchRegister = false;
+    }
+
+    ~DisallowMacroScratchRegisterUsage()
+    {
+        m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister;
+    }
+
+private:
+    MacroAssembler& m_masm;
+    bool m_oldValueOfAllowScratchRegister;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/LinkBuffer.cpp b/assembler/LinkBuffer.cpp
new file mode 100644
index 0000000..9010967
--- /dev/null
+++ b/assembler/LinkBuffer.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "CodeBlock.h"
+#include "JITCode.h"
+#include "JSCInlines.h"
+#include "Options.h"
+#include "VM.h"
+#include 
+
+namespace JSC {
+
+bool shouldDumpDisassemblyFor(CodeBlock* codeBlock)
+{
+    if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly())
+        return true;
+    return Options::dumpDisassembly();
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+{
+    performFinalization();
+    
+    ASSERT(m_didAllocate);
+    if (m_executableMemory)
+        return CodeRef(m_executableMemory);
+    
+    return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code));
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+{
+    CodeRef result = finalizeCodeWithoutDisassembly();
+
+    if (m_alreadyDisassembled)
+        return result;
+    
+    StringPrintStream out;
+    out.printf("Generated JIT code for ");
+    va_list argList;
+    va_start(argList, format);
+    out.vprintf(format, argList);
+    va_end(argList);
+    out.printf(":\n");
+
+    out.printf("    Code at [%p, %p):\n", result.code().executableAddress(), static_cast(result.code().executableAddress()) + result.size());
+    
+    CString header = out.toCString();
+    
+    if (Options::asyncDisassembly()) {
+        disassembleAsynchronously(header, result, m_size, "    ");
+        return result;
+    }
+    
+    dataLog(header);
+    disassemble(result.code(), m_size, "    ", WTF::dataFile());
+    
+    return result;
+}
+
+#if ENABLE(BRANCH_COMPACTION)
+static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset)
+{
+    int32_t ptr = regionStart / sizeof(int32_t);
+    const int32_t end = regionEnd / sizeof(int32_t);
+    int32_t* offsets = reinterpret_cast_ptr(assemblerData.buffer());
+    while (ptr < end)
+        offsets[ptr++] = offset;
+}
+
+template 
+void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+{
+    allocate(macroAssembler, ownerUID, effort);
+    const size_t initialSize = macroAssembler.m_assembler.codeSize();
+    if (didFailToAllocate())
+        return;
+
+    Vector& jumpsToLink = macroAssembler.jumpsToLink();
+    m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData();
+    uint8_t* inData = reinterpret_cast(m_assemblerStorage.buffer());
+
+    AssemblerData outBuffer(m_size);
+
+    uint8_t* outData = reinterpret_cast(outBuffer.buffer());
+    uint8_t* codeOutData = reinterpret_cast(m_code);
+
+    int readPtr = 0;
+    int writePtr = 0;
+    unsigned jumpCount = jumpsToLink.size();
+    if (m_shouldPerformBranchCompaction) {
+        for (unsigned i = 0; i < jumpCount; ++i) {
+            int offset = readPtr - writePtr;
+            ASSERT(!(offset & 1));
+                
+            // Copy the instructions from the last jump to the current one.
+            size_t regionSize = jumpsToLink[i].from() - readPtr;
+            InstructionType* copySource = reinterpret_cast_ptr(inData + readPtr);
+            InstructionType* copyEnd = reinterpret_cast_ptr(inData + readPtr + regionSize);
+            InstructionType* copyDst = reinterpret_cast_ptr(outData + writePtr);
+            ASSERT(!(regionSize % 2));
+            ASSERT(!(readPtr % 2));
+            ASSERT(!(writePtr % 2));
+            while (copySource != copyEnd)
+                *copyDst++ = *copySource++;
+            recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset);
+            readPtr += regionSize;
+            writePtr += regionSize;
+                
+            // Calculate absolute address of the jump target, in the case of backwards
+            // branches we need to be precise, forward branches we are pessimistic
+            const uint8_t* target;
+            if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+                target = codeOutData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+            else
+                target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+                
+            JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target);
+            // Compact branch if we can...
+            if (MacroAssembler::canCompact(jumpsToLink[i].type())) {
+                // Step back in the write stream
+                int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+                if (delta) {
+                    writePtr -= delta;
+                    recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+                }
+            }
+            jumpsToLink[i].setFrom(writePtr);
+        }
+    } else {
+        if (!ASSERT_DISABLED) {
+            for (unsigned i = 0; i < jumpCount; ++i)
+                ASSERT(!MacroAssembler::canCompact(jumpsToLink[i].type()));
+        }
+    }
+    // Copy everything after the last jump
+    memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr);
+    recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr);
+        
+    for (unsigned i = 0; i < jumpCount; ++i) {
+        uint8_t* location = codeOutData + jumpsToLink[i].from();
+        uint8_t* target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to());
+        MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target);
+    }
+
+    jumpsToLink.clear();
+
+    size_t compactSize = writePtr + initialSize - readPtr;
+    if (m_executableMemory) {
+        m_size = compactSize;
+        m_executableMemory->shrink(m_size);
+    } else {
+        size_t nopSizeInBytes = initialSize - compactSize;
+        bool isCopyingToExecutableMemory = false;
+        MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory);
+    }
+
+    performJITMemcpy(m_code, outData, m_size);
+
+#if DUMP_LINK_STATISTICS
+    dumpLinkStatistics(m_code, initialSize, m_size);
+#endif
+#if DUMP_CODE
+    dumpCode(m_code, m_size);
+#endif
+}
+#endif
+
+
+void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+{
+#if !ENABLE(BRANCH_COMPACTION)
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+    macroAssembler.m_assembler.buffer().flushConstantPool(false);
+#endif
+    allocate(macroAssembler, ownerUID, effort);
+    if (!m_didAllocate)
+        return;
+    ASSERT(m_code);
+    AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
+#if CPU(ARM_TRADITIONAL)
+    macroAssembler.m_assembler.prepareExecutableCopy(m_code);
+#endif
+    performJITMemcpy(m_code, buffer.data(), buffer.codeSize());
+#if CPU(MIPS)
+    macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
+#endif
+#elif CPU(ARM_THUMB2)
+    copyCompactAndLinkCode(macroAssembler, ownerUID, effort);
+#elif CPU(ARM64)
+    copyCompactAndLinkCode(macroAssembler, ownerUID, effort);
+#endif // !ENABLE(BRANCH_COMPACTION)
+
+    m_linkTasks = WTFMove(macroAssembler.m_linkTasks);
+}
+
+void LinkBuffer::allocate(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort)
+{
+    size_t initialSize = macroAssembler.m_assembler.codeSize();
+    if (m_code) {
+        if (initialSize > m_size)
+            return;
+        
+        size_t nopsToFillInBytes = m_size - initialSize;
+        macroAssembler.emitNops(nopsToFillInBytes);
+        m_didAllocate = true;
+        return;
+    }
+    
+    ASSERT(m_vm != nullptr);
+    m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort);
+    if (!m_executableMemory)
+        return;
+    m_code = m_executableMemory->start();
+    m_size = initialSize;
+    m_didAllocate = true;
+}
+
+void LinkBuffer::performFinalization()
+{
+    for (auto& task : m_linkTasks)
+        task->run(*this);
+
+#ifndef NDEBUG
+    ASSERT(!isCompilationThread());
+    ASSERT(!m_completed);
+    ASSERT(isValid());
+    m_completed = true;
+#endif
+    
+    MacroAssembler::cacheFlush(code(), m_size);
+}
+
+#if DUMP_LINK_STATISTICS
+void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
+{
+    static unsigned linkCount = 0;
+    static unsigned totalInitialSize = 0;
+    static unsigned totalFinalSize = 0;
+    linkCount++;
+    totalInitialSize += initialSize;
+    totalFinalSize += finalSize;
+    dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n", 
+            code, static_cast(initialSize), static_cast(finalSize),
+            static_cast(initialSize - finalSize),
+            100.0 * (initialSize - finalSize) / initialSize);
+    dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n", 
+            linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+            100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+}
+#endif
+
+#if DUMP_CODE
+void LinkBuffer::dumpCode(void* code, size_t size)
+{
+#if CPU(ARM_THUMB2)
+    // Dump the generated code in an asm file format that can be assembled and then disassembled
+    // for debugging purposes. For example, save this output as jit.s:
+    //   gcc -arch armv7 -c jit.s
+    //   otool -tv jit.o
+    static unsigned codeCount = 0;
+    unsigned short* tcode = static_cast(code);
+    size_t tsize = size / sizeof(short);
+    char nameBuf[128];
+    snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+    dataLogF("\t.syntax unified\n"
+            "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+            "\t.globl\t%s\n"
+            "\t.align 2\n"
+            "\t.code 16\n"
+            "\t.thumb_func\t%s\n"
+            "# %p\n"
+            "%s:\n", nameBuf, nameBuf, code, nameBuf);
+        
+    for (unsigned i = 0; i < tsize; i++)
+        dataLogF("\t.short\t0x%x\n", tcode[i]);
+#elif CPU(ARM_TRADITIONAL)
+    //   gcc -c jit.s
+    //   objdump -D jit.o
+    static unsigned codeCount = 0;
+    unsigned int* tcode = static_cast(code);
+    size_t tsize = size / sizeof(unsigned int);
+    char nameBuf[128];
+    snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+    dataLogF("\t.globl\t%s\n"
+            "\t.align 4\n"
+            "\t.code 32\n"
+            "\t.text\n"
+            "# %p\n"
+            "%s:\n", nameBuf, code, nameBuf);
+
+    for (unsigned i = 0; i < tsize; i++)
+        dataLogF("\t.long\t0x%x\n", tcode[i]);
+#endif
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+
diff --git a/assembler/LinkBuffer.h b/assembler/LinkBuffer.h
new file mode 100644
index 0000000..efb26f9
--- /dev/null
+++ b/assembler/LinkBuffer.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#define DUMP_LINK_STATISTICS 0
+#define DUMP_CODE 0
+
+#define GLOBAL_THUNK_ID reinterpret_cast(static_cast(-1))
+#define REGEXP_CODE_ID reinterpret_cast(static_cast(-2))
+#define CSS_CODE_ID reinterpret_cast(static_cast(-3))
+
+#include "JITCompilationEffort.h"
+#include "MacroAssembler.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+class VM;
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory.  At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+//   * Jump objects may be linked to external targets,
+//   * The address of Jump objects may taken, such that it can later be relinked.
+//   * The return address of a Call may be acquired.
+//   * The address of a Label pointing into the code may be resolved.
+//   * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+    WTF_MAKE_NONCOPYABLE(LinkBuffer); WTF_MAKE_FAST_ALLOCATED;
+    
+    typedef MacroAssemblerCodeRef CodeRef;
+    typedef MacroAssemblerCodePtr CodePtr;
+    typedef MacroAssembler::Label Label;
+    typedef MacroAssembler::Jump Jump;
+    typedef MacroAssembler::PatchableJump PatchableJump;
+    typedef MacroAssembler::JumpList JumpList;
+    typedef MacroAssembler::Call Call;
+    typedef MacroAssembler::DataLabelCompact DataLabelCompact;
+    typedef MacroAssembler::DataLabel32 DataLabel32;
+    typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+    typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
+#if ENABLE(BRANCH_COMPACTION)
+    typedef MacroAssembler::LinkRecord LinkRecord;
+    typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+public:
+    LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+        : m_size(0)
+        , m_didAllocate(false)
+        , m_code(0)
+        , m_vm(&vm)
+#ifndef NDEBUG
+        , m_completed(false)
+#endif
+    {
+        linkCode(macroAssembler, ownerUID, effort);
+    }
+
+    LinkBuffer(MacroAssembler& macroAssembler, void* code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true)
+        : m_size(size)
+        , m_didAllocate(false)
+        , m_code(code)
+        , m_vm(0)
+#ifndef NDEBUG
+        , m_completed(false)
+#endif
+    {
+#if ENABLE(BRANCH_COMPACTION)
+        m_shouldPerformBranchCompaction = shouldPerformBranchCompaction;
+#else
+        UNUSED_PARAM(shouldPerformBranchCompaction);
+#endif
+        linkCode(macroAssembler, 0, effort);
+    }
+
+    ~LinkBuffer()
+    {
+    }
+    
+    bool didFailToAllocate() const
+    {
+        return !m_didAllocate;
+    }
+
+    bool isValid() const
+    {
+        return !didFailToAllocate();
+    }
+    
+    // These methods are used to link or set values at code generation time.
+
+    void link(Call call, FunctionPtr function)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        call.m_label = applyOffset(call.m_label);
+        MacroAssembler::linkCall(code(), call, function);
+    }
+    
+    void link(Call call, CodeLocationLabel label)
+    {
+        link(call, FunctionPtr(label.executableAddress()));
+    }
+    
+    void link(Jump jump, CodeLocationLabel label)
+    {
+        jump.m_label = applyOffset(jump.m_label);
+        MacroAssembler::linkJump(code(), jump, label);
+    }
+
+    void link(const JumpList& list, CodeLocationLabel label)
+    {
+        for (const Jump& jump : list.jumps())
+            link(jump, label);
+    }
+
+    void patch(DataLabelPtr label, void* value)
+    {
+        AssemblerLabel target = applyOffset(label.m_label);
+        MacroAssembler::linkPointer(code(), target, value);
+    }
+
+    void patch(DataLabelPtr label, CodeLocationLabel value)
+    {
+        AssemblerLabel target = applyOffset(label.m_label);
+        MacroAssembler::linkPointer(code(), target, value.executableAddress());
+    }
+
+    // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+    
+    CodeLocationLabel entrypoint()
+    {
+        return CodeLocationLabel(code());
+    }
+
+    CodeLocationCall locationOf(Call call)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        ASSERT(!call.isFlagSet(Call::Near));
+        return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+    }
+
+    CodeLocationNearCall locationOfNearCall(Call call)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        ASSERT(call.isFlagSet(Call::Near));
+        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)),
+            call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular);
+    }
+
+    CodeLocationLabel locationOf(PatchableJump jump)
+    {
+        return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
+    }
+
+    CodeLocationLabel locationOf(Label label)
+    {
+        return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+    }
+
+    CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+    {
+        return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+    }
+
+    CodeLocationDataLabel32 locationOf(DataLabel32 label)
+    {
+        return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+    }
+    
+    CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
+    {
+        return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+    }
+
+    CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label)
+    {
+        return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+    }
+
+    // This method obtains the return address of the call, given as an offset from
+    // the start of the code.
+    unsigned returnAddressOffset(Call call)
+    {
+        call.m_label = applyOffset(call.m_label);
+        return MacroAssembler::getLinkerCallReturnOffset(call);
+    }
+
+    uint32_t offsetOf(Label label)
+    {
+        return applyOffset(label.m_label).m_offset;
+    }
+
+    unsigned offsetOf(PatchableJump jump)
+    {
+        return applyOffset(jump.m_jump.m_label).m_offset;
+    }
+
+    // Upon completion of all patching 'FINALIZE_CODE()' should be called once to
+    // complete generation of the code. Alternatively, call
+    // finalizeCodeWithoutDisassembly() directly if you have your own way of
+    // displaying disassembly.
+    
+    JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly();
+    JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+    CodePtr trampolineAt(Label label)
+    {
+        return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+    }
+
+    void* debugAddress()
+    {
+        return m_code;
+    }
+
+    size_t size() const { return m_size; }
+    
+    bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; }
+    void didAlreadyDisassemble() { m_alreadyDisassembled = true; }
+
+    VM& vm() { return *m_vm; }
+
+private:
+#if ENABLE(BRANCH_COMPACTION)
+    int executableOffsetFor(int location)
+    {
+        if (!location)
+            return 0;
+        return bitwise_cast(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1];
+    }
+#endif
+    
+    template  T applyOffset(T src)
+    {
+#if ENABLE(BRANCH_COMPACTION)
+        src.m_offset -= executableOffsetFor(src.m_offset);
+#endif
+        return src;
+    }
+
+    // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
+    void* code()
+    {
+        return m_code;
+    }
+    
+    void allocate(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+
+    JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+#if ENABLE(BRANCH_COMPACTION)
+    template 
+    void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort);
+#endif
+
+    void performFinalization();
+
+#if DUMP_LINK_STATISTICS
+    static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
+#endif
+    
+#if DUMP_CODE
+    static void dumpCode(void* code, size_t);
+#endif
+    
+    RefPtr m_executableMemory;
+    size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+    AssemblerData m_assemblerStorage;
+    bool m_shouldPerformBranchCompaction { true };
+#endif
+    bool m_didAllocate;
+    void* m_code;
+    VM* m_vm;
+#ifndef NDEBUG
+    bool m_completed;
+#endif
+    bool m_alreadyDisassembled { false };
+    Vector>> m_linkTasks;
+};
+
+#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading)  \
+    (UNLIKELY((condition))                                              \
+     ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
+     : (linkBufferReference).finalizeCodeWithoutDisassembly())
+
+bool shouldDumpDisassemblyFor(CodeBlock*);
+
+#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading)  \
+    FINALIZE_CODE_IF(shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+// Use this to finalize code, like so:
+//
+// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
+//
+// Which, in disassembly mode, will print:
+//
+// Generated JIT code for my super thingy number 42:
+//     Code at [0x123456, 0x234567]:
+//         0x123456: mov $0, 0
+//         0x12345a: ret
+//
+// ... and so on.
+//
+// Note that the dataLogFArgumentsForHeading are only evaluated when dumpDisassembly
+// is true, so you can hide expensive disassembly-only computations inside there.
+
+#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading)  \
+    FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading)  \
+    FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MIPSAssembler.h b/assembler/MIPSAssembler.h
new file mode 100644
index 0000000..b1c4232
--- /dev/null
+++ b/assembler/MIPSAssembler.h
@@ -0,0 +1,1097 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include 
+#include 
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+    r0 = 0,
+    r1,
+    r2,
+    r3,
+    r4,
+    r5,
+    r6,
+    r7,
+    r8,
+    r9,
+    r10,
+    r11,
+    r12,
+    r13,
+    r14,
+    r15,
+    r16,
+    r17,
+    r18,
+    r19,
+    r20,
+    r21,
+    r22,
+    r23,
+    r24,
+    r25,
+    r26,
+    r27,
+    r28,
+    r29,
+    r30,
+    r31,
+    zero = r0,
+    at = r1,
+    v0 = r2,
+    v1 = r3,
+    a0 = r4,
+    a1 = r5,
+    a2 = r6,
+    a3 = r7,
+    t0 = r8,
+    t1 = r9,
+    t2 = r10,
+    t3 = r11,
+    t4 = r12,
+    t5 = r13,
+    t6 = r14,
+    t7 = r15,
+    s0 = r16,
+    s1 = r17,
+    s2 = r18,
+    s3 = r19,
+    s4 = r20,
+    s5 = r21,
+    s6 = r22,
+    s7 = r23,
+    t8 = r24,
+    t9 = r25,
+    k0 = r26,
+    k1 = r27,
+    gp = r28,
+    sp = r29,
+    fp = r30,
+    ra = r31
+} RegisterID;
+
+typedef enum {
+    f0,
+    f1,
+    f2,
+    f3,
+    f4,
+    f5,
+    f6,
+    f7,
+    f8,
+    f9,
+    f10,
+    f11,
+    f12,
+    f13,
+    f14,
+    f15,
+    f16,
+    f17,
+    f18,
+    f19,
+    f20,
+    f21,
+    f22,
+    f23,
+    f24,
+    f25,
+    f26,
+    f27,
+    f28,
+    f29,
+    f30,
+    f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+    typedef MIPSRegisters::RegisterID RegisterID;
+    typedef MIPSRegisters::FPRegisterID FPRegisterID;
+    typedef SegmentedVector Jumps;
+
+    static constexpr RegisterID firstRegister() { return MIPSRegisters::r0; }
+    static constexpr RegisterID lastRegister() { return MIPSRegisters::r31; }
+
+    static constexpr FPRegisterID firstFPRegister() { return MIPSRegisters::f0; }
+    static constexpr FPRegisterID lastFPRegister() { return MIPSRegisters::f31; }
+
+    MIPSAssembler()
+        : m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+
+    AssemblerBuffer& buffer() { return m_buffer; }
+
+    // MIPS instruction opcode field position
+    enum {
+        OP_SH_RD = 11,
+        OP_SH_RT = 16,
+        OP_SH_RS = 21,
+        OP_SH_SHAMT = 6,
+        OP_SH_CODE = 16,
+        OP_SH_FD = 6,
+        OP_SH_FS = 11,
+        OP_SH_FT = 16
+    };
+
+    void emitInst(MIPSWord op)
+    {
+        void* oldBase = m_buffer.data();
+
+        m_buffer.putInt(op);
+
+        void* newBase = m_buffer.data();
+        if (oldBase != newBase)
+            relocateJumps(oldBase, newBase);
+    }
+
+    void nop()
+    {
+        emitInst(0x00000000);
+    }
+
+    void sync()
+    {
+        emitInst(0x0000000f);
+    }
+
+    /* Need to insert one load data delay nop for mips1.  */
+    void loadDelayNop()
+    {
+#if WTF_MIPS_ISA(1)
+        nop();
+#endif
+    }
+
+    /* Need to insert one coprocessor access delay nop for mips1.  */
+    void copDelayNop()
+    {
+#if WTF_MIPS_ISA(1)
+        nop();
+#endif
+    }
+
+    void move(RegisterID rd, RegisterID rs)
+    {
+        /* addu */
+        emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+    }
+
+    /* Set an immediate value to a register.  This may generate 1 or 2
+       instructions.  */
+    void li(RegisterID dest, int imm)
+    {
+        if (imm >= -32768 && imm <= 32767)
+            addiu(dest, MIPSRegisters::zero, imm);
+        else if (imm >= 0 && imm < 65536)
+            ori(dest, MIPSRegisters::zero, imm);
+        else {
+            lui(dest, imm >> 16);
+            if (imm & 0xffff)
+                ori(dest, dest, imm);
+        }
+    }
+
+    void lui(RegisterID rt, int imm)
+    {
+        emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+    }
+
+    void clz(RegisterID rd, RegisterID rs)
+    {
+        emitInst(0x70000020 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rd << OP_SH_RT));
+    }
+
+    void addiu(RegisterID rt, RegisterID rs, int imm)
+    {
+        emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void mult(RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void div(RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void mfhi(RegisterID rd)
+    {
+        emitInst(0x00000010 | (rd << OP_SH_RD));
+    }
+
+    void mflo(RegisterID rd)
+    {
+        emitInst(0x00000012 | (rd << OP_SH_RD));
+    }
+
+    void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+#if WTF_MIPS_ISA_AT_LEAST(32) 
+        emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+#else
+        mult(rs, rt);
+        mflo(rd);
+#endif
+    }
+
+    void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void andi(RegisterID rt, RegisterID rs, int imm)
+    {
+        emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void ori(RegisterID rt, RegisterID rs, int imm)
+    {
+        emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void xori(RegisterID rt, RegisterID rs, int imm)
+    {
+        emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+    {
+        emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+    }
+
+    void sltiu(RegisterID rt, RegisterID rs, int imm)
+    {
+        emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void sll(RegisterID rd, RegisterID rt, int shamt)
+    {
+        emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+    }
+
+    void sllv(RegisterID rd, RegisterID rt, RegisterID rs)
+    {
+        emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+    }
+
+    void sra(RegisterID rd, RegisterID rt, int shamt)
+    {
+        emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+    }
+
+    void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+    {
+        emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+    }
+
+    void srl(RegisterID rd, RegisterID rt, int shamt)
+    {
+        emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+    }
+
+    void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
+    {
+        emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+    }
+
+    void lb(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lbu(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lw(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lwl(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lwr(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lh(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void lhu(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        loadDelayNop();
+    }
+
+    void sb(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void sh(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void sw(RegisterID rt, RegisterID rs, int offset)
+    {
+        emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void jr(RegisterID rs)
+    {
+        emitInst(0x00000008 | (rs << OP_SH_RS));
+    }
+
+    void jalr(RegisterID rs)
+    {
+        emitInst(0x0000f809 | (rs << OP_SH_RS));
+    }
+
+    void jal()
+    {
+        emitInst(0x0c000000);
+    }
+
+    void bkpt()
+    {
+        int value = 512; /* BRK_BUG */
+        emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+    }
+
+    void bgez(RegisterID rs, int imm)
+    {
+        emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void bltz(RegisterID rs, int imm)
+    {
+        emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+    }
+
+    void beq(RegisterID rs, RegisterID rt, int imm)
+    {
+        emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+    }
+
+    void bne(RegisterID rs, RegisterID rt, int imm)
+    {
+        emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+    }
+
+    void bc1t()
+    {
+        emitInst(0x45010000);
+    }
+
+    void bc1f()
+    {
+        emitInst(0x45000000);
+    }
+
+    void appendJump()
+    {
+        m_jumps.append(m_buffer.label());
+    }
+
+    void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+    }
+
+    void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+    }
+
+    void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+    }
+
+    void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+    }
+
+    void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+    {
+        emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+        copDelayNop();
+    }
+
+    void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+    {
+        emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void swc1(FPRegisterID ft, RegisterID rs, int offset)
+    {
+        emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+    {
+        emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+    }
+
+    void mtc1(RegisterID rt, FPRegisterID fs)
+    {
+        emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+        copDelayNop();
+    }
+
+    void mthc1(RegisterID rt, FPRegisterID fs)
+    {
+        emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+        copDelayNop();
+    }
+
+    void mfc1(RegisterID rt, FPRegisterID fs)
+    {
+        emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+        copDelayNop();
+    }
+
+    void sqrtd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void absd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200005 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void movd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void negd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void truncwd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void cvtdw(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void cvtds(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void cvtwd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void cvtsd(FPRegisterID fd, FPRegisterID fs)
+    {
+        emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+    }
+
+    void ceqd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cngtd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cnged(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cltd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cled(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cueqd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void coled(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void coltd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void culed(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    void cultd(FPRegisterID fs, FPRegisterID ft)
+    {
+        emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+        copDelayNop();
+    }
+
+    // General helpers
+
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        return m_buffer.label();
+    }
+
+    AssemblerLabel labelForWatchpoint()
+    {
+        AssemblerLabel result = m_buffer.label();
+        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = m_buffer.label();
+        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            nop();
+            result = m_buffer.label();
+        }
+        return result;
+    }
+
+    AssemblerLabel align(int alignment)
+    {
+        while (!m_buffer.isAligned(alignment))
+            bkpt();
+
+        return label();
+    }
+
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+    }
+
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+
+    // Assembler admin methods:
+
+    size_t codeSize() const
+    {
+        return m_buffer.codeSize();
+    }
+
+    unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+    // Assembly helpers for moving data between fp and registers.
+    void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+    {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+        mfc1(rd1, rn);
+        mfhc1(rd2, rn);
+#else
+        mfc1(rd1, rn);
+        mfc1(rd2, FPRegisterID(rn + 1));
+#endif
+    }
+
+    void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+    {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+        mtc1(rn1, rd);
+        mthc1(rn2, rd);
+#else
+        mtc1(rn1, rd);
+        mtc1(rn2, FPRegisterID(rd + 1));
+#endif
+    }
+
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        // The return address is after a call and a delay slot instruction
+        return call.m_offset;
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    static size_t linkDirectJump(void* code, void* to)
+    {
+        MIPSWord* insn = reinterpret_cast(reinterpret_cast(code));
+        size_t ops = 0;
+        int32_t slotAddr = reinterpret_cast(insn) + 4;
+        int32_t toAddr = reinterpret_cast(to);
+
+        if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) {
+            // lui
+            *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff);
+            ++insn;
+            // ori
+            *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff);
+            ++insn;
+            // jr
+            *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+            ++insn;
+            ops = 4 * sizeof(MIPSWord);
+        } else {
+            // j
+            *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2);
+            ++insn;
+            ops = 2 * sizeof(MIPSWord);
+        }
+        // nop
+        *insn = 0x00000000;
+        return ops;
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+        MIPSWord* insn = reinterpret_cast(reinterpret_cast(m_buffer.data()) + from.m_offset);
+        MIPSWord* toPos = reinterpret_cast(reinterpret_cast(m_buffer.data()) + to.m_offset);
+
+        ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+        insn = insn - 6;
+        linkWithOffset(insn, toPos);
+    }
+
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+        MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.m_offset);
+
+        ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+        insn = insn - 6;
+        linkWithOffset(insn, to);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.m_offset);
+        linkCallInternal(insn, to);
+    }
+
+    static void linkPointer(void* code, AssemblerLabel from, void* to)
+    {
+        MIPSWord* insn = reinterpret_cast(reinterpret_cast(code) + from.m_offset);
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        *insn = (*insn & 0xffff0000) | ((reinterpret_cast(to) >> 16) & 0xffff);
+        insn++;
+        ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+        *insn = (*insn & 0xffff0000) | (reinterpret_cast(to) & 0xffff);
+    }
+
+    static void relinkJump(void* from, void* to)
+    {
+        MIPSWord* insn = reinterpret_cast(from);
+
+        ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+        insn = insn - 6;
+        int flushSize = linkWithOffset(insn, to);
+
+        cacheFlush(insn, flushSize);
+    }
+
+    static void relinkCall(void* from, void* to)
+    {
+        void* start;
+        int size = linkCallInternal(from, to);
+        if (size == sizeof(MIPSWord))
+            start = reinterpret_cast(reinterpret_cast(from) - 2 * sizeof(MIPSWord));
+        else
+            start = reinterpret_cast(reinterpret_cast(from) - 4 * sizeof(MIPSWord));
+
+        cacheFlush(start, size);
+    }
+
+    static void repatchInt32(void* from, int32_t to)
+    {
+        MIPSWord* insn = reinterpret_cast(from);
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+        insn++;
+        ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+        *insn = (*insn & 0xffff0000) | (to & 0xffff);
+        insn--;
+        cacheFlush(insn, 2 * sizeof(MIPSWord));
+    }
+
+    static int32_t readInt32(void* from)
+    {
+        MIPSWord* insn = reinterpret_cast(from);
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        int32_t result = (*insn & 0x0000ffff) << 16;
+        insn++;
+        ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+        result |= *insn & 0x0000ffff;
+        return result;
+    }
+    
+    static void repatchCompact(void* where, int32_t value)
+    {
+        repatchInt32(where, value);
+    }
+
+    static void repatchPointer(void* from, void* to)
+    {
+        repatchInt32(from, reinterpret_cast(to));
+    }
+
+    static void* readPointer(void* from)
+    {
+        return reinterpret_cast(readInt32(from));
+    }
+
+    static void* readCallTarget(void* from)
+    {
+        MIPSWord* insn = reinterpret_cast(from);
+        insn -= 4;
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        int32_t result = (*insn & 0x0000ffff) << 16;
+        insn++;
+        ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+        result |= *insn & 0x0000ffff;
+        return reinterpret_cast(result);
+    }
+
+    static void cacheFlush(void* code, size_t size)
+    {
+        intptr_t end = reinterpret_cast(code) + size;
+        __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end));
+    }
+
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return sizeof(MIPSWord) * 4;
+    }
+
+    static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm)
+    {
+        MIPSWord* insn = static_cast(instructionStart);
+        size_t codeSize = 2 * sizeof(MIPSWord);
+
+        // lui
+        *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff);
+        ++insn;
+        // ori
+        *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff);
+        ++insn;
+        // if jr $t9
+        if (*insn == 0x03200008) {
+            *insn = 0x00000000;
+            codeSize += sizeof(MIPSWord);
+        }
+        cacheFlush(insn, codeSize);
+    }
+
+    static void replaceWithJump(void* instructionStart, void* to)
+    {
+        ASSERT(!(bitwise_cast(instructionStart) & 3));
+        ASSERT(!(bitwise_cast(to) & 3));
+        size_t ops = linkDirectJump(instructionStart, to);
+        cacheFlush(instructionStart, ops);
+    }
+
+    static void replaceWithLoad(void* instructionStart)
+    {
+        MIPSWord* insn = reinterpret_cast(instructionStart);
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        insn++;
+        ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+        insn++;
+        *insn = 0x8c000000 | ((*insn) & 0x3ffffff); // lw
+        cacheFlush(insn, 4);
+    }
+
+    static void replaceWithAddressComputation(void* instructionStart)
+    {
+        MIPSWord* insn = reinterpret_cast(instructionStart);
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        insn++;
+        ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+        insn++;
+        *insn = 0x24000000 | ((*insn) & 0x3ffffff); // addiu
+        cacheFlush(insn, 4);
+    }
+
+    /* Update each jump in the buffer of newBase.  */
+    void relocateJumps(void* oldBase, void* newBase)
+    {
+        // Check each jump
+        for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+            int pos = iter->m_offset;
+            MIPSWord* insn = reinterpret_cast(reinterpret_cast(newBase) + pos);
+            insn = insn + 2;
+            // Need to make sure we have 5 valid instructions after pos
+            if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+                continue;
+
+            if ((*insn & 0xfc000000) == 0x08000000) { // j
+                int offset = *insn & 0x03ffffff;
+                int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+                int topFourBits = (oldInsnAddress + 4) >> 28;
+                int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+                int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+                int newInsnAddress = (int)insn;
+                if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+                    *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+                else {
+                    /* lui */
+                    *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+                    /* ori */
+                    *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+                    /* jr */
+                    *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+                }
+            } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+                int high = (*insn & 0xffff) << 16;
+                int low = *(insn + 1) & 0xffff;
+                int oldTargetAddress = high | low;
+                int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+                /* lui */
+                *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+                /* ori */
+                *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+            }
+        }
+    }
+
+private:
+    static int linkWithOffset(MIPSWord* insn, void* to)
+    {
+        ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+            || (*insn & 0xfc000000) == 0x14000000 // bne
+            || (*insn & 0xffff0000) == 0x45010000 // bc1t
+            || (*insn & 0xffff0000) == 0x45000000); // bc1f
+        intptr_t diff = (reinterpret_cast(to) - reinterpret_cast(insn) - 4) >> 2;
+
+        if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+            /*
+                Convert the sequence:
+                  beq $2, $3, target
+                  nop
+                  b 1f
+                  nop
+                  nop
+                  nop
+                1:
+
+                to the new sequence if possible:
+                  bne $2, $3, 1f
+                  nop
+                  j    target
+                  nop
+                  nop
+                  nop
+                1:
+
+                OR to the new sequence:
+                  bne $2, $3, 1f
+                  nop
+                  lui $25, target >> 16
+                  ori $25, $25, target & 0xffff
+                  jr $25
+                  nop
+                1:
+
+                Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+            */
+
+            if (*(insn + 2) == 0x10000003) {
+                if ((*insn & 0xfc000000) == 0x10000000) // beq
+                    *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+                else if ((*insn & 0xfc000000) == 0x14000000) // bne
+                    *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+                else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+                    *insn = 0x45000005; // bc1f
+                else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+                    *insn = 0x45010005; // bc1t
+                else
+                    ASSERT(0);
+            }
+
+            insn = insn + 2;
+            if ((reinterpret_cast(insn) + 4) >> 28
+                == reinterpret_cast(to) >> 28) {
+                *insn = 0x08000000 | ((reinterpret_cast(to) >> 2) & 0x3ffffff);
+                *(insn + 1) = 0;
+                return 4 * sizeof(MIPSWord);
+            }
+
+            intptr_t newTargetAddress = reinterpret_cast(to);
+            /* lui */
+            *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+            /* ori */
+            *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+            /* jr */
+            *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+            return 5 * sizeof(MIPSWord);
+        }
+
+        *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+        return sizeof(MIPSWord);
+    }
+
+    static int linkCallInternal(void* from, void* to)
+    {
+        MIPSWord* insn = reinterpret_cast(from);
+        insn = insn - 4;
+
+        if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+            if ((reinterpret_cast(from) - 4) >> 28
+                == reinterpret_cast(to) >> 28) {
+                *(insn + 2) = 0x0c000000 | ((reinterpret_cast(to) >> 2) & 0x3ffffff);
+                return sizeof(MIPSWord);
+            }
+
+            /* lui $25, (to >> 16) & 0xffff */
+            *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast(to) >> 16) & 0xffff);
+            /* ori $25, $25, to & 0xffff */
+            *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast(to) & 0xffff);
+            /* jalr $25 */
+            *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+            return 3 * sizeof(MIPSWord);
+        }
+
+        ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+        ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+        /* lui */
+        *insn = (*insn & 0xffff0000) | ((reinterpret_cast(to) >> 16) & 0xffff);
+        /* ori */
+        *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast(to) & 0xffff);
+        return 2 * sizeof(MIPSWord);
+    }
+
+    AssemblerBuffer m_buffer;
+    Jumps m_jumps;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
diff --git a/assembler/MacroAssembler.cpp b/assembler/MacroAssembler.cpp
new file mode 100644
index 0000000..0cd5bcf
--- /dev/null
+++ b/assembler/MacroAssembler.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "MacroAssembler.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include 
+
+namespace JSC {
+
+const double MacroAssembler::twoToThe32 = (double)0x100000000ull;
+
+#if ENABLE(MASM_PROBE)
+static void stdFunctionCallback(MacroAssembler::ProbeContext* context)
+{
+    auto func = static_cast*>(context->arg1);
+    (*func)(context);
+}
+    
+void MacroAssembler::probe(std::function func)
+{
+    probe(stdFunctionCallback, new std::function(func), 0);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::Equal:
+        out.print("Equal");
+        return;
+    case MacroAssembler::NotEqual:
+        out.print("NotEqual");
+        return;
+    case MacroAssembler::Above:
+        out.print("Above");
+        return;
+    case MacroAssembler::AboveOrEqual:
+        out.print("AboveOrEqual");
+        return;
+    case MacroAssembler::Below:
+        out.print("Below");
+        return;
+    case MacroAssembler::BelowOrEqual:
+        out.print("BelowOrEqual");
+        return;
+    case MacroAssembler::GreaterThan:
+        out.print("GreaterThan");
+        return;
+    case MacroAssembler::GreaterThanOrEqual:
+        out.print("GreaterThanOrEqual");
+        return;
+    case MacroAssembler::LessThan:
+        out.print("LessThan");
+        return;
+    case MacroAssembler::LessThanOrEqual:
+        out.print("LessThanOrEqual");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::Overflow:
+        out.print("Overflow");
+        return;
+    case MacroAssembler::Signed:
+        out.print("Signed");
+        return;
+    case MacroAssembler::PositiveOrZero:
+        out.print("PositiveOrZero");
+        return;
+    case MacroAssembler::Zero:
+        out.print("Zero");
+        return;
+    case MacroAssembler::NonZero:
+        out.print("NonZero");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, MacroAssembler::DoubleCondition cond)
+{
+    switch (cond) {
+    case MacroAssembler::DoubleEqual:
+        out.print("DoubleEqual");
+        return;
+    case MacroAssembler::DoubleNotEqual:
+        out.print("DoubleNotEqual");
+        return;
+    case MacroAssembler::DoubleGreaterThan:
+        out.print("DoubleGreaterThan");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrEqual:
+        out.print("DoubleGreaterThanOrEqual");
+        return;
+    case MacroAssembler::DoubleLessThan:
+        out.print("DoubleLessThan");
+        return;
+    case MacroAssembler::DoubleLessThanOrEqual:
+        out.print("DoubleLessThanOrEqual");
+        return;
+    case MacroAssembler::DoubleEqualOrUnordered:
+        out.print("DoubleEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleNotEqualOrUnordered:
+        out.print("DoubleNotEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrUnordered:
+        out.print("DoubleGreaterThanOrUnordered");
+        return;
+    case MacroAssembler::DoubleGreaterThanOrEqualOrUnordered:
+        out.print("DoubleGreaterThanOrEqualOrUnordered");
+        return;
+    case MacroAssembler::DoubleLessThanOrUnordered:
+        out.print("DoubleLessThanOrUnordered");
+        return;
+    case MacroAssembler::DoubleLessThanOrEqualOrUnordered:
+        out.print("DoubleLessThanOrEqualOrUnordered");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(ASSEMBLER)
+
diff --git a/assembler/MacroAssembler.h b/assembler/MacroAssembler.h
new file mode 100644
index 0000000..526cc82
--- /dev/null
+++ b/assembler/MacroAssembler.h
@@ -0,0 +1,1813 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "JSCJSValue.h"
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+#include "MacroAssemblerHelpers.h"
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+    static constexpr RegisterID nextRegister(RegisterID reg)
+    {
+        return static_cast(reg + 1);
+    }
+    
+    static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
+    {
+        return static_cast(reg + 1);
+    }
+    
+    static constexpr unsigned numberOfRegisters()
+    {
+        return lastRegister() - firstRegister() + 1;
+    }
+    
+    static constexpr unsigned registerIndex(RegisterID reg)
+    {
+        return reg - firstRegister();
+    }
+    
+    static constexpr unsigned numberOfFPRegisters()
+    {
+        return lastFPRegister() - firstFPRegister() + 1;
+    }
+    
+    static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
+    {
+        return reg - firstFPRegister();
+    }
+    
+    static constexpr unsigned registerIndex(FPRegisterID reg)
+    {
+        return fpRegisterIndex(reg) + numberOfRegisters();
+    }
+    
+    static constexpr unsigned totalNumberOfRegisters()
+    {
+        return numberOfRegisters() + numberOfFPRegisters();
+    }
+
+    using MacroAssemblerBase::pop;
+    using MacroAssemblerBase::jump;
+    using MacroAssemblerBase::branch32;
+    using MacroAssemblerBase::compare32;
+    using MacroAssemblerBase::move;
+    using MacroAssemblerBase::add32;
+    using MacroAssemblerBase::mul32;
+    using MacroAssemblerBase::and32;
+    using MacroAssemblerBase::branchAdd32;
+    using MacroAssemblerBase::branchMul32;
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(X86_64)
+    using MacroAssemblerBase::branchPtr;
+#endif
+    using MacroAssemblerBase::branchSub32;
+    using MacroAssemblerBase::lshift32;
+    using MacroAssemblerBase::or32;
+    using MacroAssemblerBase::rshift32;
+    using MacroAssemblerBase::store32;
+    using MacroAssemblerBase::sub32;
+    using MacroAssemblerBase::urshift32;
+    using MacroAssemblerBase::xor32;
+
+    static bool isPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value == static_cast(value);
+    }
+
+    static const double twoToThe32; // This is super useful for some double code.
+
+    // Utilities used by the DFG JIT.
+    using MacroAssemblerBase::invert;
+    
+    static DoubleCondition invert(DoubleCondition cond)
+    {
+        switch (cond) {
+        case DoubleEqual:
+            return DoubleNotEqualOrUnordered;
+        case DoubleNotEqual:
+            return DoubleEqualOrUnordered;
+        case DoubleGreaterThan:
+            return DoubleLessThanOrEqualOrUnordered;
+        case DoubleGreaterThanOrEqual:
+            return DoubleLessThanOrUnordered;
+        case DoubleLessThan:
+            return DoubleGreaterThanOrEqualOrUnordered;
+        case DoubleLessThanOrEqual:
+            return DoubleGreaterThanOrUnordered;
+        case DoubleEqualOrUnordered:
+            return DoubleNotEqual;
+        case DoubleNotEqualOrUnordered:
+            return DoubleEqual;
+        case DoubleGreaterThanOrUnordered:
+            return DoubleLessThanOrEqual;
+        case DoubleGreaterThanOrEqualOrUnordered:
+            return DoubleLessThan;
+        case DoubleLessThanOrUnordered:
+            return DoubleGreaterThanOrEqual;
+        case DoubleLessThanOrEqualOrUnordered:
+            return DoubleGreaterThan;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return DoubleEqual; // make compiler happy
+    }
+    
+    static bool isInvertible(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+        case NonZero:
+        case Signed:
+        case PositiveOrZero:
+            return true;
+        default:
+            return false;
+        }
+    }
+    
+    static ResultCondition invert(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+            return NonZero;
+        case NonZero:
+            return Zero;
+        case Signed:
+            return PositiveOrZero;
+        case PositiveOrZero:
+            return Signed;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Zero; // Make compiler happy for release builds.
+        }
+    }
+
+    static RelationalCondition flip(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+        case NotEqual:
+            return cond;
+        case Above:
+            return Below;
+        case AboveOrEqual:
+            return BelowOrEqual;
+        case Below:
+            return Above;
+        case BelowOrEqual:
+            return AboveOrEqual;
+        case GreaterThan:
+            return LessThan;
+        case GreaterThanOrEqual:
+            return LessThanOrEqual;
+        case LessThan:
+            return GreaterThan;
+        case LessThanOrEqual:
+            return GreaterThanOrEqual;
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+        return Equal;
+    }
+
+    static bool isSigned(RelationalCondition cond)
+    {
+        return MacroAssemblerHelpers::isSigned(cond);
+    }
+
+    static bool isUnsigned(RelationalCondition cond)
+    {
+        return MacroAssemblerHelpers::isUnsigned(cond);
+    }
+
+    static bool isSigned(ResultCondition cond)
+    {
+        return MacroAssemblerHelpers::isSigned(cond);
+    }
+
+    static bool isUnsigned(ResultCondition cond)
+    {
+        return MacroAssemblerHelpers::isUnsigned(cond);
+    }
+
+    // Platform agnostic convenience functions,
+    // described in terms of other macro assembly methods.
+    void pop()
+    {
+        addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+    }
+    
+    void peek(RegisterID dest, int index = 0)
+    {
+        loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+    }
+
+    Address addressForPoke(int index)
+    {
+        return Address(stackPointerRegister, (index * sizeof(void*)));
+    }
+    
+    void poke(RegisterID src, int index = 0)
+    {
+        storePtr(src, addressForPoke(index));
+    }
+
+    void poke(TrustedImm32 value, int index = 0)
+    {
+        store32(value, addressForPoke(index));
+    }
+
+    void poke(TrustedImmPtr imm, int index = 0)
+    {
+        storePtr(imm, addressForPoke(index));
+    }
+
+#if !CPU(ARM64)
+    void pushToSave(RegisterID src)
+    {
+        push(src);
+    }
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+    {
+        push(imm);
+    }
+    void popToRestore(RegisterID dest)
+    {
+        pop(dest);
+    }
+    void pushToSave(FPRegisterID src)
+    {
+        subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+        storeDouble(src, stackPointerRegister);
+    }
+    void popToRestore(FPRegisterID dest)
+    {
+        loadDouble(stackPointerRegister, dest);
+        addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+    }
+    
+    static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
+#endif // !CPU(ARM64)
+
+#if CPU(X86_64) || CPU(ARM64)
+    void peek64(RegisterID dest, int index = 0)
+    {
+        load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+    }
+
+    void poke(TrustedImm64 value, int index = 0)
+    {
+        store64(value, addressForPoke(index));
+    }
+
+    void poke64(RegisterID src, int index = 0)
+    {
+        store64(src, addressForPoke(index));
+    }
+#endif
+    
+#if CPU(MIPS)
+    void poke(FPRegisterID src, int index = 0)
+    {
+        ASSERT(!(index & 1));
+        storeDouble(src, addressForPoke(index));
+    }
+#endif
+
+    // Immediate shifts only have 5 controllable bits
+    // so we'll consider them safe for now.
+    TrustedImm32 trustedImm32ForShift(Imm32 imm)
+    {
+        return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+    }
+
+    // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+    void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+    {
+        branchPtr(cond, op1, imm).linkTo(target, this);
+    }
+    void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+    {
+        branchPtr(cond, op1, imm).linkTo(target, this);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
+    {
+        return branch32(flip(cond), right, left);
+    }
+
+    void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+    {
+        branch32(cond, op1, op2).linkTo(target, this);
+    }
+
+    void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+    {
+        branch32(cond, op1, imm).linkTo(target, this);
+    }
+    
+    void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+    {
+        branch32(cond, op1, imm).linkTo(target, this);
+    }
+
+    void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+    {
+        branch32(cond, left, right).linkTo(target, this);
+    }
+
+    Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+    {
+        return branch32(commute(cond), right, left);
+    }
+
+    Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+    {
+        return branch32(commute(cond), right, left);
+    }
+
+    void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
+    {
+        compare32(commute(cond), right, left, dest);
+    }
+
+    void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+    {
+        branchTestPtr(cond, reg).linkTo(target, this);
+    }
+
+#if !CPU(ARM_THUMB2) && !CPU(ARM64)
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+    {
+        return PatchableJump(branchPtr(cond, left, right));
+    }
+    
+    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
+    }
+
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+    }
+
+#if !CPU(ARM_TRADITIONAL)
+    PatchableJump patchableJump()
+    {
+        return PatchableJump(jump());
+    }
+
+    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return PatchableJump(branchTest32(cond, reg, mask));
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+    {
+        return PatchableJump(branch32(cond, reg, imm));
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+    {
+        return PatchableJump(branch32(cond, address, imm));
+    }
+#endif
+#endif
+
+    void jump(Label target)
+    {
+        jump().linkTo(target, this);
+    }
+
+    // Commute a relational condition, returns a new condition that will produce
+    // the same results given the same inputs but with their positions exchanged.
+    static RelationalCondition commute(RelationalCondition condition)
+    {
+        switch (condition) {
+        case Above:
+            return Below;
+        case AboveOrEqual:
+            return BelowOrEqual;
+        case Below:
+            return Above;
+        case BelowOrEqual:
+            return AboveOrEqual;
+        case GreaterThan:
+            return LessThan;
+        case GreaterThanOrEqual:
+            return LessThanOrEqual;
+        case LessThan:
+            return GreaterThan;
+        case LessThanOrEqual:
+            return GreaterThanOrEqual;
+        default:
+            break;
+        }
+
+        ASSERT(condition == Equal || condition == NotEqual);
+        return condition;
+    }
+
+    void oops()
+    {
+        abortWithReason(B3Oops);
+    }
+
+    // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
+    // consumes some register in some way.
+    void retVoid() { ret(); }
+    void ret32(RegisterID) { ret(); }
+    void ret64(RegisterID) { ret(); }
+    void retFloat(FPRegisterID) { ret(); }
+    void retDouble(FPRegisterID) { ret(); }
+
+    static const unsigned BlindingModulus = 64;
+    bool shouldConsiderBlinding()
+    {
+        return !(random() & (BlindingModulus - 1));
+    }
+
+    // Ptr methods
+    // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+    // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64) && !CPU(ARM64)
+    void addPtr(Address src, RegisterID dest)
+    {
+        add32(src, dest);
+    }
+
+    void addPtr(AbsoluteAddress src, RegisterID dest)
+    {
+        add32(src, dest);
+    }
+
+    void addPtr(RegisterID src, RegisterID dest)
+    {
+        add32(src, dest);
+    }
+
+    void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        add32(left, right, dest);
+    }
+
+    void addPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add32(imm, srcDest);
+    }
+
+    void addPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        add32(TrustedImm32(imm), dest);
+    }
+
+    void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        add32(imm, src, dest);
+    }
+
+    void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        add32(imm, address);
+    }
+    
+    void andPtr(RegisterID src, RegisterID dest)
+    {
+        and32(src, dest);
+    }
+
+    void andPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        and32(imm, srcDest);
+    }
+
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+    {
+        and32(TrustedImm32(imm), srcDest);
+    }
+
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        lshift32(trustedImm32ForShift(imm), srcDest);
+    }
+    
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        rshift32(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        urshift32(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void negPtr(RegisterID dest)
+    {
+        neg32(dest);
+    }
+
+    void orPtr(RegisterID src, RegisterID dest)
+    {
+        or32(src, dest);
+    }
+
+    void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        or32(op1, op2, dest);
+    }
+
+    void orPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        or32(TrustedImm32(imm), dest);
+    }
+
+    void orPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        or32(imm, dest);
+    }
+
+    void subPtr(RegisterID src, RegisterID dest)
+    {
+        sub32(src, dest);
+    }
+    
+    void subPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        sub32(imm, dest);
+    }
+    
+    void subPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        sub32(TrustedImm32(imm), dest);
+    }
+
+    void xorPtr(RegisterID src, RegisterID dest)
+    {
+        xor32(src, dest);
+    }
+
+    void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        xor32(imm, srcDest);
+    }
+
+
+    void loadPtr(ImplicitAddress address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void loadPtr(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void loadPtr(const void* address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load32WithAddressOffsetPatch(address, dest);
+    }
+    
+    DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load32WithCompactAddressOffsetPatch(address, dest);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        move(Imm32(imm.asTrustedImmPtr()), dest);
+    }
+
+    void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        compare32(cond, left, right, dest);
+    }
+
+    void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        compare32(cond, left, right, dest);
+    }
+    
+    void storePtr(RegisterID src, ImplicitAddress address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(RegisterID src, BaseIndex address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(RegisterID src, void* address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+    {
+        store32(TrustedImm32(imm), address);
+    }
+    
+    void storePtr(ImmPtr imm, Address address)
+    {
+        store32(Imm32(imm.asTrustedImmPtr()), address);
+    }
+
+    void storePtr(TrustedImmPtr imm, void* address)
+    {
+        store32(TrustedImm32(imm), address);
+    }
+
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store32(imm, address);
+    }
+
+    void storePtr(TrustedImmPtr imm, BaseIndex address)
+    {
+        store32(TrustedImm32(imm), address);
+    }
+
+    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        return store32WithAddressOffsetPatch(src, address);
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+    {
+        return branch32(cond, left, TrustedImm32(right));
+    }
+    
+    Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+    {
+        return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+    {
+        return branch32(cond, left, TrustedImm32(right));
+    }
+    
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+    {
+        return branch32(cond, left, TrustedImm32(right));
+    }
+
+    Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub32(cond, src, dest);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        return branchTest32(cond, reg, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest32(cond, reg, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest32(cond, address, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest32(cond, address, mask);
+    }
+
+    Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd32(cond, src, dest);
+    }
+
+    Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub32(cond, imm, dest);
+    }
+    using MacroAssemblerBase::branchTest8;
+    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+    }
+
+#else // !CPU(X86_64)
+
+    void addPtr(RegisterID src, RegisterID dest)
+    {
+        add64(src, dest);
+    }
+
+    void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        add64(left, right, dest);
+    }
+    
+    void addPtr(Address src, RegisterID dest)
+    {
+        add64(src, dest);
+    }
+
+    void addPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add64(imm, srcDest);
+    }
+
+    void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        add64(imm, src, dest);
+    }
+
+    void addPtr(TrustedImm32 imm, Address address)
+    {
+        add64(imm, address);
+    }
+
+    void addPtr(AbsoluteAddress src, RegisterID dest)
+    {
+        add64(src, dest);
+    }
+
+    void addPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        add64(TrustedImm64(imm), dest);
+    }
+
+    void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        add64(imm, address);
+    }
+
+    void andPtr(RegisterID src, RegisterID dest)
+    {
+        and64(src, dest);
+    }
+
+    void andPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        and64(imm, srcDest);
+    }
+    
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+    {
+        and64(imm, srcDest);
+    }
+    
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        lshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        rshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        urshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
+    void negPtr(RegisterID dest)
+    {
+        neg64(dest);
+    }
+
+    void orPtr(RegisterID src, RegisterID dest)
+    {
+        or64(src, dest);
+    }
+
+    void orPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        or64(imm, dest);
+    }
+
+    void orPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        or64(TrustedImm64(imm), dest);
+    }
+
+    void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        or64(op1, op2, dest);
+    }
+
+    void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        or64(imm, src, dest);
+    }
+    
+    void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+    {
+        rotateRight64(imm, srcDst);
+    }
+
+    void subPtr(RegisterID src, RegisterID dest)
+    {
+        sub64(src, dest);
+    }
+    
+    void subPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        sub64(imm, dest);
+    }
+    
+    void subPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        sub64(TrustedImm64(imm), dest);
+    }
+
+    void xorPtr(RegisterID src, RegisterID dest)
+    {
+        xor64(src, dest);
+    }
+    
+    void xorPtr(RegisterID src, Address dest)
+    {
+        xor64(src, dest);
+    }
+
+    void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+    {
+        xor64(imm, srcDest);
+    }
+
+    void loadPtr(ImplicitAddress address, RegisterID dest)
+    {
+        load64(address, dest);
+    }
+
+    void loadPtr(BaseIndex address, RegisterID dest)
+    {
+        load64(address, dest);
+    }
+
+    void loadPtr(const void* address, RegisterID dest)
+    {
+        load64(address, dest);
+    }
+
+    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load64WithAddressOffsetPatch(address, dest);
+    }
+    
+    DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load64WithCompactAddressOffsetPatch(address, dest);
+    }
+
+    void storePtr(RegisterID src, ImplicitAddress address)
+    {
+        store64(src, address);
+    }
+
+    void storePtr(RegisterID src, BaseIndex address)
+    {
+        store64(src, address);
+    }
+    
+    void storePtr(RegisterID src, void* address)
+    {
+        store64(src, address);
+    }
+
+    void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+    {
+        store64(TrustedImm64(imm), address);
+    }
+
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store64(imm, address);
+    }
+
+    void storePtr(TrustedImmPtr imm, BaseIndex address)
+    {
+        store64(TrustedImm64(imm), address);
+    }
+
+    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        return store64WithAddressOffsetPatch(src, address);
+    }
+
+    void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        compare64(cond, left, right, dest);
+    }
+    
+    void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        compare64(cond, left, right, dest);
+    }
+    
+    void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+    {
+        test64(cond, reg, mask, dest);
+    }
+
+    void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+    {
+        test64(cond, reg, mask, dest);
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        return branch64(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+    {
+        return branch64(cond, left, TrustedImm64(right));
+    }
+    
+    Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+    {
+        return branch64(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+    {
+        return branch64(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        return branch64(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+    {
+        return branch64(cond, left, TrustedImm64(right));
+    }
+
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        return branchTest64(cond, reg, mask);
+    }
+    
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest64(cond, reg, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest64(cond, address, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+    {
+        return branchTest64(cond, address, reg);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest64(cond, address, mask);
+    }
+
+    Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest64(cond, address, mask);
+    }
+
+    Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd64(cond, imm, dest);
+    }
+
+    Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd64(cond, src, dest);
+    }
+
+    Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub64(cond, imm, dest);
+    }
+
+    Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub64(cond, src, dest);
+    }
+
+    Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+    {
+        return branchSub64(cond, src1, src2, dest);
+    }
+
+    using MacroAssemblerBase::and64;
+    using MacroAssemblerBase::convertInt32ToDouble;
+    using MacroAssemblerBase::store64;
+    bool shouldBlindDouble(double value)
+    {
+        // Don't trust NaN or +/-Infinity
+        if (!std::isfinite(value))
+            return shouldConsiderBlinding();
+
+        // Try to force normalisation, and check that there's no change
+        // in the bit pattern
+        if (bitwise_cast(value * 1.0) != bitwise_cast(value))
+            return shouldConsiderBlinding();
+
+        value = fabs(value);
+        // Only allow a limited set of fractional components
+        double scaledValue = value * 8;
+        if (scaledValue / 8 != value)
+            return shouldConsiderBlinding();
+        double frac = scaledValue - floor(scaledValue);
+        if (frac != 0.0)
+            return shouldConsiderBlinding();
+
+        return value > 0xff;
+    }
+    
+    bool shouldBlindPointerForSpecificArch(uintptr_t value)
+    {
+        if (sizeof(void*) == 4)
+            return shouldBlindForSpecificArch(static_cast(value));
+        return shouldBlindForSpecificArch(static_cast(value));
+    }
+    
+    bool shouldBlind(ImmPtr imm)
+    {
+        if (!canBlind())
+            return false;
+        
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;
+#endif
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffffL:
+        case 0xffffffffffL:
+        case 0xffffffffffffL:
+        case 0xffffffffffffffL:
+        case 0xffffffffffffffffL:
+            return false;
+        default: {
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+        }
+        }
+
+        if (!shouldConsiderBlinding())
+            return false;
+
+        return shouldBlindPointerForSpecificArch(value);
+    }
+    
+    struct RotatedImmPtr {
+        RotatedImmPtr(uintptr_t v1, uint8_t v2)
+            : value(v1)
+            , rotation(v2)
+        {
+        }
+        TrustedImmPtr value;
+        TrustedImm32 rotation;
+    };
+    
+    RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+    {
+        uint8_t rotation = random() % (sizeof(void*) * 8);
+        uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+        value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+        return RotatedImmPtr(value, rotation);
+    }
+    
+    void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+    {
+        move(constant.value, dest);
+        rotateRightPtr(constant.rotation, dest);
+    }
+
+    bool shouldBlind(Imm64 imm)
+    {
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;        
+#endif
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uint64_t value = imm.asTrustedImm64().m_value;
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffffL:
+        case 0xffffffffffL:
+        case 0xffffffffffffL:
+        case 0xffffffffffffffL:
+        case 0xffffffffffffffffL:
+            return false;
+        default: {
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+
+            JSValue jsValue = JSValue::decode(value);
+            if (jsValue.isInt32())
+                return shouldBlind(Imm32(jsValue.asInt32()));
+            if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+                return false;
+
+            if (!shouldBlindDouble(bitwise_cast(value)))
+                return false;
+        }
+        }
+
+        if (!shouldConsiderBlinding())
+            return false;
+
+        return shouldBlindForSpecificArch(value);
+    }
+    
+    struct RotatedImm64 {
+        RotatedImm64(uint64_t v1, uint8_t v2)
+            : value(v1)
+            , rotation(v2)
+        {
+        }
+        TrustedImm64 value;
+        TrustedImm32 rotation;
+    };
+    
+    RotatedImm64 rotationBlindConstant(Imm64 imm)
+    {
+        uint8_t rotation = random() % (sizeof(int64_t) * 8);
+        uint64_t value = imm.asTrustedImm64().m_value;
+        value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+        return RotatedImm64(value, rotation);
+    }
+    
+    void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+    {
+        move(constant.value, dest);
+        rotateRight64(constant.rotation, dest);
+    }
+
+    void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+    {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+            convertInt32ToDouble(scratchRegister, dest);
+        } else
+            convertInt32ToDouble(imm.asTrustedImm32(), dest);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        if (shouldBlind(imm))
+            loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImmPtr(), dest);
+    }
+
+    void move(Imm64 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm))
+            loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImm64(), dest);
+    }
+
+    void and64(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            and64(key.value1, dest);
+            and64(key.value2, dest);
+        } else
+            and64(imm.asTrustedImm32(), dest);
+    }
+
+    Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+    {
+        if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+            return branchPtr(cond, left, scratchRegister);
+        }
+        return branchPtr(cond, left, right.asTrustedImmPtr());
+    }
+    
+    void storePtr(ImmPtr imm, Address dest)
+    {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+            storePtr(scratchRegister, dest);
+        } else
+            storePtr(imm.asTrustedImmPtr(), dest);
+    }
+
+    void store64(Imm64 imm, Address dest)
+    {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+            store64(scratchRegister, dest);
+        } else
+            store64(imm.asTrustedImm64(), dest);
+    }
+
+#endif // !CPU(X86_64)
+
+#if ENABLE(B3_JIT)
+    // We should implement this the right way eventually, but for now, it's fine because it arises so
+    // infrequently.
+    void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        move(TrustedImm32(0), dest);
+        Jump falseCase = branchDouble(invert(cond), left, right);
+        move(TrustedImm32(1), dest);
+        falseCase.link(this);
+    }
+    void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        move(TrustedImm32(0), dest);
+        Jump falseCase = branchFloat(invert(cond), left, right);
+        move(TrustedImm32(1), dest);
+        falseCase.link(this);
+    }
+#endif
+
+    void lea32(Address address, RegisterID dest)
+    {
+        add32(TrustedImm32(address.offset), address.base, dest);
+    }
+
+#if CPU(X86_64) || CPU(ARM64)
+    void lea64(Address address, RegisterID dest)
+    {
+        add64(TrustedImm32(address.offset), address.base, dest);
+    }
+#endif // CPU(X86_64) || CPU(ARM64)
+
+    bool shouldBlind(Imm32 imm)
+    {
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;
+#else // ENABLE(FORCED_JIT_BLINDING)
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uint32_t value = imm.asTrustedImm32().m_value;
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffff:
+            return false;
+        default:
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+        }
+
+        if (!shouldConsiderBlinding())
+            return false;
+
+        return shouldBlindForSpecificArch(value);
+#endif // ENABLE(FORCED_JIT_BLINDING)
+    }
+
+    struct BlindedImm32 {
+        BlindedImm32(int32_t v1, int32_t v2)
+            : value1(v1)
+            , value2(v2)
+        {
+        }
+        TrustedImm32 value1;
+        TrustedImm32 value2;
+    };
+
+    uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+    {
+        uint32_t key = random();
+        if (value <= 0xff)
+            mask = 0xff;
+        else if (value <= 0xffff)
+            mask = 0xffff;
+        else if (value <= 0xffffff)
+            mask = 0xffffff;
+        else
+            mask = 0xffffffff;
+        return key & mask;
+    }
+
+    uint32_t keyForConstant(uint32_t value)
+    {
+        uint32_t mask = 0;
+        return keyForConstant(value, mask);
+    }
+
+    BlindedImm32 xorBlindConstant(Imm32 imm)
+    {
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t key = keyForConstant(baseValue);
+        return BlindedImm32(baseValue ^ key, key);
+    }
+
+    BlindedImm32 additionBlindedConstant(Imm32 imm)
+    {
+        // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+        static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+        if (key > baseValue)
+            key = key - baseValue;
+        return BlindedImm32(baseValue - key, key);
+    }
+    
+    BlindedImm32 andBlindedConstant(Imm32 imm)
+    {
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t mask = 0;
+        uint32_t key = keyForConstant(baseValue, mask);
+        ASSERT((baseValue & mask) == baseValue);
+        return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+    }
+    
+    BlindedImm32 orBlindedConstant(Imm32 imm)
+    {
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t mask = 0;
+        uint32_t key = keyForConstant(baseValue, mask);
+        ASSERT((baseValue & mask) == baseValue);
+        return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+    }
+    
+    void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+    {
+        move(constant.value1, dest);
+        xor32(constant.value2, dest);
+    }
+    
+    void add32(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            add32(key.value1, dest);
+            add32(key.value2, dest);
+        } else
+            add32(imm.asTrustedImm32(), dest);
+    }
+
+    void add32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            add32(key.value1, src, dest);
+            add32(key.value2, dest);
+        } else
+            add32(imm.asTrustedImm32(), src, dest);
+    }
+    
+    void addPtr(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            addPtr(key.value1, dest);
+            addPtr(key.value2, dest);
+        } else
+            addPtr(imm.asTrustedImm32(), dest);
+    }
+
+    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            if (src != dest || haveScratchRegisterForBlinding()) {
+                if (src == dest) {
+                    move(src, scratchRegisterForBlinding());
+                    src = scratchRegisterForBlinding();
+                }
+                loadXorBlindedConstant(xorBlindConstant(imm), dest);
+                mul32(src, dest);
+                return;
+            }
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+        }
+        mul32(imm.asTrustedImm32(), src, dest);
+    }
+
+    void and32(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            and32(key.value1, dest);
+            and32(key.value2, dest);
+        } else
+            and32(imm.asTrustedImm32(), dest);
+    }
+
+    void andPtr(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            andPtr(key.value1, dest);
+            andPtr(key.value2, dest);
+        } else
+            andPtr(imm.asTrustedImm32(), dest);
+    }
+    
+    void and32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            if (src == dest)
+                return and32(imm.asTrustedImm32(), dest);
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            and32(src, dest);
+        } else
+            and32(imm.asTrustedImm32(), src, dest);
+    }
+
+    void move(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm))
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImm32(), dest);
+    }
+    
+    void or32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            if (src == dest)
+                return or32(imm, dest);
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            or32(src, dest);
+        } else
+            or32(imm.asTrustedImm32(), src, dest);
+    }
+    
+    void or32(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = orBlindedConstant(imm);
+            or32(key.value1, dest);
+            or32(key.value2, dest);
+        } else
+            or32(imm.asTrustedImm32(), dest);
+    }
+    
+    void poke(Imm32 value, int index = 0)
+    {
+        store32(value, addressForPoke(index));
+    }
+    
+    void poke(ImmPtr value, int index = 0)
+    {
+        storePtr(value, addressForPoke(index));
+    }
+    
+#if CPU(X86_64) || CPU(ARM64)
+    void poke(Imm64 value, int index = 0)
+    {
+        store64(value, addressForPoke(index));
+    }
+#endif // CPU(X86_64)
+    
+    void store32(Imm32 imm, Address dest)
+    {
+        if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+            BlindedImm32 blind = xorBlindConstant(imm);
+            store32(blind.value1, dest);
+            xor32(blind.value2, dest);
+#else // CPU(X86) || CPU(X86_64)
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
+                store32(scratchRegisterForBlinding(), dest);
+            } else {
+                // If we don't have a scratch register available for use, we'll just 
+                // place a random number of nops.
+                uint32_t nopCount = random() & 3;
+                while (nopCount--)
+                    nop();
+                store32(imm.asTrustedImm32(), dest);
+            }
+#endif // CPU(X86) || CPU(X86_64)
+        } else
+            store32(imm.asTrustedImm32(), dest);
+    }
+    
+    void sub32(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            sub32(key.value1, dest);
+            sub32(key.value2, dest);
+        } else
+            sub32(imm.asTrustedImm32(), dest);
+    }
+    
+    void subPtr(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            subPtr(key.value1, dest);
+            subPtr(key.value2, dest);
+        } else
+            subPtr(imm.asTrustedImm32(), dest);
+    }
+    
+    void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 blind = xorBlindConstant(imm);
+            xor32(blind.value1, src, dest);
+            xor32(blind.value2, dest);
+        } else
+            xor32(imm.asTrustedImm32(), src, dest);
+    }
+    
+    void xor32(Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            BlindedImm32 blind = xorBlindConstant(imm);
+            xor32(blind.value1, dest);
+            xor32(blind.value2, dest);
+        } else
+            xor32(imm.asTrustedImm32(), dest);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+    {
+        if (shouldBlind(right)) {
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
+                return branch32(cond, left, scratchRegisterForBlinding());
+            }
+            // If we don't have a scratch register available for use, we'll just 
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+            return branch32(cond, left, right.asTrustedImm32());
+        }
+        
+        return branch32(cond, left, right.asTrustedImm32());
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        if (shouldBlind(right)) {
+            if (left != dest || haveScratchRegisterForBlinding()) {
+                RegisterID blindedConstantReg = dest;
+                if (left == dest)
+                    blindedConstantReg = scratchRegisterForBlinding();
+                loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
+                compare32(cond, left, blindedConstantReg, dest);
+                return;
+            }
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+            compare32(cond, left, right.asTrustedImm32(), dest);
+            return;
+        }
+
+        compare32(cond, left, right.asTrustedImm32(), dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+    {
+        if (shouldBlind(imm)) {
+            if (src != dest || haveScratchRegisterForBlinding()) {
+                if (src == dest) {
+                    move(src, scratchRegisterForBlinding());
+                    src = scratchRegisterForBlinding();
+                }
+                loadXorBlindedConstant(xorBlindConstant(imm), dest);
+                return branchAdd32(cond, src, dest);
+            }
+            // If we don't have a scratch register available for use, we'll just
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+        }
+        return branchAdd32(cond, src, imm.asTrustedImm32(), dest);            
+    }
+    
+    Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+    {
+        if (src == dest)
+            ASSERT(haveScratchRegisterForBlinding());
+
+        if (shouldBlind(imm)) {
+            if (src == dest) {
+                move(src, scratchRegisterForBlinding());
+                src = scratchRegisterForBlinding();
+            }
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            return branchMul32(cond, src, dest);  
+        }
+        return branchMul32(cond, src, imm.asTrustedImm32(), dest);
+    }
+
+    // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+    // with src == dst, and on x86-32 we don't have a platform scratch register.
+    Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+    {
+        if (shouldBlind(imm)) {
+            ASSERT(scratch != dest);
+            ASSERT(scratch != src);
+            loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+            return branchSub32(cond, src, scratch, dest);
+        }
+        return branchSub32(cond, src, imm.asTrustedImm32(), dest);            
+    }
+    
+    void lshift32(Imm32 imm, RegisterID dest)
+    {
+        lshift32(trustedImm32ForShift(imm), dest);
+    }
+    
+    void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+    {
+        lshift32(src, trustedImm32ForShift(amount), dest);
+    }
+    
+    void rshift32(Imm32 imm, RegisterID dest)
+    {
+        rshift32(trustedImm32ForShift(imm), dest);
+    }
+    
+    void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+    {
+        rshift32(src, trustedImm32ForShift(amount), dest);
+    }
+    
+    void urshift32(Imm32 imm, RegisterID dest)
+    {
+        urshift32(trustedImm32ForShift(imm), dest);
+    }
+    
+    void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+    {
+        urshift32(src, trustedImm32ForShift(amount), dest);
+    }
+
+#if ENABLE(MASM_PROBE)
+    using MacroAssemblerBase::probe;
+
+    // Let's you print from your JIT generated code.
+    // See comments in MacroAssemblerPrinter.h for examples of how to use this.
+    template
+    void print(Arguments... args);
+
+    void probe(std::function);
+#endif
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
+
+} // namespace WTF
+
+#else // ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+    MacroAssembler() { }
+    
+public:
+    
+    enum RegisterID { NoRegister };
+    enum FPRegisterID { NoFPRegister };
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerARM.cpp b/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 0000000..9b1440f
--- /dev/null
+++ b/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#include 
+
+#if OS(LINUX)
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if OS(LINUX)
+    int fd = open("/proc/self/auxv", O_RDONLY);
+    if (fd != -1) {
+        Elf32_auxv_t aux;
+        while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+            if (aux.a_type == AT_HWCAP) {
+                close(fd);
+                return aux.a_un.a_val & HWCAP_VFP;
+            }
+        }
+        close(fd);
+    }
+#endif // OS(LINUX)
+
+#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__))
+    return true;
+#else
+    return false;
+#endif
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+    ARMWord op2;
+
+    ASSERT(address.scale >= 0 && address.scale <= 3);
+    op2 = m_assembler.lsl(address.index, static_cast(address.scale));
+
+    if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+        m_assembler.add(ARMRegisters::S0, address.base, op2);
+        m_assembler.halfDtrUp(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset));
+        m_assembler.halfDtrUp(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2));
+    } else if (address.offset < 0 && address.offset >= -0xff) {
+        m_assembler.add(ARMRegisters::S0, address.base, op2);
+        m_assembler.halfDtrDown(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset));
+        m_assembler.halfDtrDown(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2));
+    } else {
+        m_assembler.moveImm(address.offset, ARMRegisters::S0);
+        m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, op2);
+        m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0);
+        m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::Op2Immediate | 0x2);
+        m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0);
+    }
+    m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif // CPU(ARMV5_OR_LOWER)
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+#if COMPILER(GCC_OR_CLANG)
+    
+// The following are offsets for MacroAssemblerARM::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
+
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(sizeof(MacroAssemblerARM::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+#undef PROBE_OFFSETOF
+
+asm (
+    ".text" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARM::probe() has already generated code to store some values.
+    // The top of stack now looks like this:
+    //     esp[0 * ptrSize]: probeFunction
+    //     esp[1 * ptrSize]: arg1
+    //     esp[2 * ptrSize]: arg2
+    //     esp[3 * ptrSize]: saved r3 / S0
+    //     esp[4 * ptrSize]: saved ip
+    //     esp[5 * ptrSize]: saved lr
+    //     esp[6 * ptrSize]: saved sp
+
+    "mov       ip, sp" "\n"
+    "mov       r3, sp" "\n"
+    "sub       r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       r3, r3, #0xf" "\n"
+    "mov       sp, r3" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "add       lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n"
+    "stmia     lr, { r0-r11 }" "\n"
+    "mrs       lr, APSR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "vmrs      lr, FPSCR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+    "ldr       lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+    "vstmia.64 ip, { d0-d15 }" "\n"
+
+    "mov       fp, sp" "\n" // Save the ProbeContext*.
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "mov       r0, sp" "\n" // the ProbeContext* arg.
+    "blx       ip" "\n"
+
+    "mov       sp, fp" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
+    "vldmdb.64 ip!, { d0-d15 }" "\n"
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+    "ldmdb     ip, { r0-r11 }" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+    "vmsr      FPSCR, ip" "\n"
+
+    // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+    // There are 2 issues that complicate the restoration of these last few
+    // registers:
+    //
+    // 1. Normal ARM calling convention relies on moving lr to pc to return to
+    //    the caller. In our case, the address to return to is specified by
+    //    ProbeContext.cpu.pc. And at that moment, we won't have any available
+    //    scratch registers to hold the return address (lr needs to hold
+    //    ProbeContext.cpu.lr, not the return address).
+    //
+    //    The solution is to store the return address on the stack and load the
+    //     pc from there.
+    //
+    // 2. Issue 1 means we will need to write to the stack location at
+    //    ProbeContext.cpu.sp - 4. But if the user probe function had  modified
+    //    the value of ProbeContext.cpu.sp to point in the range between
+    //    &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+    //    Issue 1 may trash the values to be restored before we can restore
+    //    them.
+    //
+    //    The solution is to check if ProbeContext.cpu.sp contains a value in
+    //    the undesirable range. If so, we copy the remaining ProbeContext
+    //    register data to a safe range (at memory lower than where
+    //    ProbeContext.cpu.sp points) first, and restore the remaining register
+    //    from this new range.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "cmp       lr, ip" "\n"
+    "bgt     " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // We get here because the new expected stack pointer location is lower
+    // than where it's supposed to be. This means the safe range of stack
+    // memory where we'll be copying the remaining register restore values to
+    // might be in a region of memory below the sp i.e. unallocated stack
+    // memory. This in turn makes it vulnerable to interrupts potentially
+    // trashing the copied values. To prevent that, we must first allocate the
+    // needed stack memory by adjusting the sp before the copying.
+
+    "sub       lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+    " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+    "mov       ip, sp" "\n"
+    "mov       sp, lr" "\n"
+    "mov       lr, ip" "\n"
+
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "sub       lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+    "str       ip, [lr]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "msr       APSR, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "mov       lr, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "pop       { pc }" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
+
+void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2)
+{
+    push(RegisterID::sp);
+    push(RegisterID::lr);
+    push(RegisterID::ip);
+    push(RegisterID::S0);
+    // The following uses RegisterID::S0. So, they must come after we push S0 above.
+    push(trustedImm32FromPtr(arg2));
+    push(trustedImm32FromPtr(arg1));
+    push(trustedImm32FromPtr(function));
+
+    move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::S0);
+    m_assembler.blx(RegisterID::S0);
+
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/assembler/MacroAssemblerARM.h b/assembler/MacroAssemblerARM.h
new file mode 100644
index 0000000..7d36034
--- /dev/null
+++ b/assembler/MacroAssemblerARM.h
@@ -0,0 +1,1627 @@
+/*
+ * Copyright (C) 2008, 2013-2016 Apple Inc.
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler {
+    static const int DoubleConditionMask = 0x0f;
+    static const int DoubleConditionBitSpecial = 0x10;
+    COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
+public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
+    typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+    enum RelationalCondition {
+        Equal = ARMAssembler::EQ,
+        NotEqual = ARMAssembler::NE,
+        Above = ARMAssembler::HI,
+        AboveOrEqual = ARMAssembler::CS,
+        Below = ARMAssembler::CC,
+        BelowOrEqual = ARMAssembler::LS,
+        GreaterThan = ARMAssembler::GT,
+        GreaterThanOrEqual = ARMAssembler::GE,
+        LessThan = ARMAssembler::LT,
+        LessThanOrEqual = ARMAssembler::LE
+    };
+
+    enum ResultCondition {
+        Overflow = ARMAssembler::VS,
+        Signed = ARMAssembler::MI,
+        PositiveOrZero = ARMAssembler::PL,
+        Zero = ARMAssembler::EQ,
+        NonZero = ARMAssembler::NE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARMAssembler::EQ,
+        DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
+        DoubleGreaterThan = ARMAssembler::GT,
+        DoubleGreaterThanOrEqual = ARMAssembler::GE,
+        DoubleLessThan = ARMAssembler::CC,
+        DoubleLessThanOrEqual = ARMAssembler::LS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+        DoubleNotEqualOrUnordered = ARMAssembler::NE,
+        DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+        DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+        DoubleLessThanOrUnordered = ARMAssembler::LT,
+        DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
+    };
+
+    static const RegisterID stackPointerRegister = ARMRegisters::sp;
+    static const RegisterID framePointerRegister = ARMRegisters::fp;
+    static const RegisterID linkRegister = ARMRegisters::lr;
+
+    static const Scale ScalePtr = TimesFour;
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.adds(dest, dest, src);
+    }
+
+    void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.adds(dest, op1, op2);
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        add32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void add32(AbsoluteAddress src, RegisterID dest)
+    {
+        move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
+        add32(ARMRegisters::S1, dest);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        add32(ARMRegisters::S1, dest);
+    }
+
+    void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.bitAnds(dest, dest, src);
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.bitAnds(dest, op1, op2);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+        if (w & ARMAssembler::Op2InvertedImmediate)
+            m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
+        else
+            m_assembler.bitAnds(dest, dest, w);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+        if (w & ARMAssembler::Op2InvertedImmediate)
+            m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
+        else
+            m_assembler.bitAnds(dest, src, w);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        and32(ARMRegisters::S1, dest);
+    }
+
+    void lshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        lshift32(dest, shiftAmount, dest);
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+        m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
+    }
+
+    void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest) {
+            if (op1 == dest) {
+                move(op2, ARMRegisters::S0);
+                op2 = ARMRegisters::S0;
+            } else {
+                // Swap the operands.
+                RegisterID tmp = op1;
+                op1 = op2;
+                op2 = tmp;
+            }
+        }
+        m_assembler.muls(dest, op1, op2);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        mul32(src, dest, dest);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, ARMRegisters::S0);
+        m_assembler.muls(dest, src, ARMRegisters::S0);
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orrs(dest, dest, src);
+    }
+
+    void or32(RegisterID src, AbsoluteAddress dest)
+    {
+        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+        load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+        or32(src, ARMRegisters::S1);
+        store32(ARMRegisters::S1, ARMRegisters::S0);
+    }
+
+    void or32(TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+        load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+        or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address.
+        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+        store32(ARMRegisters::S1, ARMRegisters::S0);
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S0);
+        or32(imm, ARMRegisters::S0, ARMRegisters::S0);
+        store32(ARMRegisters::S0, address);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT(dest != ARMRegisters::S0);
+        m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        ASSERT(src != ARMRegisters::S0);
+        m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orrs(dest, op1, op2);
+    }
+
+    void rshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        rshift32(dest, shiftAmount, dest);
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+        m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
+    }
+
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        rshift32(dest, imm, dest);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+    }
+
+    void urshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift32(dest, shiftAmount, dest);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+        m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
+    }
+
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
+    }
+    
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subs(dest, dest, src);
+    }
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.subs(dest, left, right);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        sub32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        sub32(ARMRegisters::S1, dest);
+    }
+
+    void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.eors(dest, dest, src);
+    }
+
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eors(dest, op1, op2);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvns(dest, dest);
+        else
+            m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvns(dest, src);
+        else    
+            m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+#if WTF_ARM_ARCH_AT_LEAST(5)
+        m_assembler.clz(dest, src);
+#else
+        UNUSED_PARAM(src);
+        UNUSED_PARAM(dest);
+        RELEASE_ASSERT_NOT_REACHED();
+#endif
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void load8(const void* address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address), ARMRegisters::S0);
+        m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
+    }
+
+    void load8SignedExtendTo32(Address address, RegisterID dest)
+    {
+        m_assembler.dataTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.offset);
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+#if CPU(ARMV5_OR_LOWER)
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+#endif
+
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        load16(address, dest);
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), ARMRegisters::S0);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), ARMRegisters::S1);
+        abortWithReason(reason);
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+        ASSERT(address.offset >= 0 && address.offset <= 255);
+        m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+        return result;
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+        m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
+        return dataLabel;
+    }
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value >= -4095 && value <= 4095;
+    }
+
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabelCompact dataLabel(this);
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        if (address.offset >= 0)
+            m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+        else
+            m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+        return dataLabel;
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+        m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
+        return dataLabel;
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void store8(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
+    }
+
+    void store8(RegisterID src, const void* address)
+    {
+        move(TrustedImmPtr(address), ARMRegisters::S0);
+        m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
+    }
+
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, ARMRegisters::S1);
+        store8(ARMRegisters::S1, address);
+    }
+
+    void store8(TrustedImm32 imm, const void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(TrustedImm32(reinterpret_cast(address)), ARMRegisters::S0);
+        move(imm8, ARMRegisters::S1);
+        m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        move(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        move(imm, ARMRegisters::S1);
+        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void store32(RegisterID src, const void* address)
+    {
+        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast(address));
+        m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
+    }
+
+    void store32(TrustedImm32 imm, const void* address)
+    {
+        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast(address));
+        m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
+    }
+
+    void pop(RegisterID dest)
+    {
+        m_assembler.pop(dest);
+    }
+
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.pop(dest1);
+        m_assembler.pop(dest2);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.push(src);
+    }
+
+    void push(Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        push(ARMRegisters::S1);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        move(imm, ARMRegisters::S0);
+        push(ARMRegisters::S0);
+    }
+
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.push(src2);
+        m_assembler.push(src1);
+    }
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.moveImm(imm.m_value, dest);
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.mov(dest, src);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        move(TrustedImm32(imm), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        xor32(reg1, reg2);
+        xor32(reg2, reg1);
+        xor32(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
+    }
+
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right8);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+    {
+        m_assembler.cmp(left, right);
+        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
+    {
+        internalCompare32(left, right);
+        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load32(right, ARMRegisters::S1);
+        return branch32(cond, left, ARMRegisters::S1);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask8);
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
+        m_assembler.tst(reg, mask);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
+        ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+        if (w & ARMAssembler::Op2InvertedImmediate)
+            m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
+        else
+            m_assembler.tst(reg, w);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask);
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask);
+    }
+
+    Jump jump()
+    {
+        return Jump(m_assembler.jmp());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.bx(target);
+    }
+
+    void jump(Address address)
+    {
+        load32(address, ARMRegisters::pc);
+    }
+
+    void jump(AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
+        load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
+    }
+
+    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.vmov(dest1, dest2, src);
+    }
+
+    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+    {
+        m_assembler.vmov(dest, src1, src2);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+            || (cond == NonZero) || (cond == PositiveOrZero));
+        add32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+            || (cond == NonZero) || (cond == PositiveOrZero));
+        add32(op1, op2, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+            || (cond == NonZero) || (cond == PositiveOrZero));
+        add32(imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+            || (cond == NonZero) || (cond == PositiveOrZero));
+        add32(src, imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
+            || (cond == NonZero) || (cond == PositiveOrZero));
+        add32(imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S0);
+        return branchAdd32(cond, dest, ARMRegisters::S0, dest);
+    }
+    void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest) {
+            if (op1 == dest) {
+                move(op2, ARMRegisters::S0);
+                op2 = ARMRegisters::S0;
+            } else {
+                // Swap the operands.
+                RegisterID tmp = op1;
+                op1 = op2;
+                op2 = tmp;
+            }
+        }
+        m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
+        m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            mull32(src1, src2, dest);
+            cond = NonZero;
+        }
+        else
+            mul32(src1, src2, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchMul32(cond, src, dest, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            move(imm, ARMRegisters::S0);
+            mull32(ARMRegisters::S0, src, dest);
+            cond = NonZero;
+        }
+        else
+            mul32(imm, src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(src, imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        m_assembler.subs(dest, op1, op2);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        neg32(srcDest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+        or32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    PatchableJump patchableJump()
+    {
+        return PatchableJump(m_assembler.jmp(ARMAssembler::AL, 1));
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+    {
+        internalCompare32(reg, imm);
+        Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
+        m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+        return PatchableJump(jump);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+    {
+        internalCompare32(address, imm);
+        Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), false));
+        m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+        return PatchableJump(jump);
+    }
+
+    void breakpoint()
+    {
+        m_assembler.bkpt(0);
+    }
+
+    Call nearCall()
+    {
+        m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+        return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
+    }
+
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
+    Call call(RegisterID target)
+    {
+        return Call(m_assembler.blx(target), Call::None);
+    }
+
+    void call(Address address)
+    {
+        call32(address.base, address.offset);
+    }
+
+    void ret()
+    {
+        m_assembler.bx(linkRegister);
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp(left, right);
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1);
+        compare32(cond, ARMRegisters::S1, right8, dest);
+    }
+
+    void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst(reg, reg);
+        else
+            m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        load32(address, ARMRegisters::S1);
+        test32(cond, ARMRegisters::S1, mask, dest);
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1);
+        test32(cond, ARMRegisters::S1, mask8, dest);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, ARMRegisters::S1);
+        add32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address.m_ptr);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        ARMWord tmp;
+
+        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+        if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
+            m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
+        else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
+            m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
+        else {
+            m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
+            move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+        }
+        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+        if (imm.m_value >= 0)
+            m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+        else
+            m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, ARMRegisters::S1);
+        sub32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address.m_ptr);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast(address));
+        m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        load32(left.m_ptr, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    void relativeTableJump(RegisterID index, int scale)
+    {
+        ASSERT(scale >= 0 && scale <= 31);
+        m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
+
+        // NOP the default prefetching
+        m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
+    }
+
+    Call call()
+    {
+        ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+        m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+        return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
+    }
+
+    Call tailRecursiveCall()
+    {
+        return Call::fromTailJump(jump());
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        return Call::fromTailJump(oldJump);
+    }
+
+    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+    {
+        DataLabelPtr dataLabel(this);
+        m_assembler.ldrUniqueImmediate(dest, reinterpret_cast(initialValue.m_value));
+        return dataLabel;
+    }
+
+    DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldrUniqueImmediate(dest, static_cast(initialValue.m_value));
+        return dataLabel;
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+        Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+        return jump;
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        load32(left, ARMRegisters::S1);
+        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+        return jump;
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        load32(left, ARMRegisters::S1);
+        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+        return jump;
+    }
+
+    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+        return dataLabel;
+    }
+
+    DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+    {
+        return storePtrWithPatch(TrustedImmPtr(0), address);
+    }
+
+    // Floating point operators
+    static bool supportsFloatingPoint()
+    {
+        return s_isVFPPresent;
+    }
+
+    static bool supportsFloatingPointTruncate()
+    {
+        return false;
+    }
+
+    static bool supportsFloatingPointSqrt()
+    {
+        return s_isVFPPresent;
+    }
+    static bool supportsFloatingPointAbs() { return false; }
+    static bool supportsFloatingPointRounding() { return false; }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+        move(TrustedImm32(reinterpret_cast(address.m_value)), ARMRegisters::S0);
+        m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
+    }
+
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast(address.scale), address.offset);
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+        move(TrustedImm32(reinterpret_cast(address.m_value)), ARMRegisters::S0);
+        m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.vmov_f64(dest, src);
+    }
+
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        static double zeroConstant = 0.;
+        loadDouble(TrustedImmPtr(&zeroConstant), reg);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vadd_f64(dest, dest, src);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vadd_f64(dest, op1, op2);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        addDouble(ARMRegisters::SD0, dest);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
+        addDouble(ARMRegisters::SD0, dest);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vdiv_f64(dest, dest, src);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vdiv_f64(dest, op1, op2);
+    }
+
+    void divDouble(Address src, FPRegisterID dest)
+    {
+        RELEASE_ASSERT_NOT_REACHED(); // Untested
+        loadDouble(src, ARMRegisters::SD0);
+        divDouble(ARMRegisters::SD0, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vsub_f64(dest, dest, src);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vsub_f64(dest, op1, op2);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        subDouble(ARMRegisters::SD0, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmul_f64(dest, dest, src);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        mulDouble(ARMRegisters::SD0, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vmul_f64(dest, op1, op2);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vsqrt_f64(dest, src);
+    }
+    
+    void absDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vabs_f64(dest, src);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vneg_f64(dest, src);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmov_vfp32(dest << 1, src);
+        m_assembler.vcvt_f64_s32(dest, dest << 1);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        convertInt32ToDouble(ARMRegisters::S1, dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+        load32(Address(ARMRegisters::S1), ARMRegisters::S1);
+        convertInt32ToDouble(ARMRegisters::S1, dest);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.vcvt_f64_f32(dst, src);
+    }
+
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.vcvt_f32_f64(dst, src);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.vcmp_f64(left, right);
+        m_assembler.vmrs_apsr();
+        if (cond & DoubleConditionBitSpecial)
+            m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+        return Jump(m_assembler.jmp(static_cast(cond & ~DoubleConditionMask)));
+    }
+
+    // Truncates 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, INT_MIN).
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        truncateDoubleToInt32(src, dest);
+
+        m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+        m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+        ARMWord w = ARMAssembler::getOp2(0x80000000);
+        ASSERT(w != ARMAssembler::InvalidImmediate);
+        m_assembler.cmp(ARMRegisters::S0, w);
+        return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+    }
+
+    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        truncateDoubleToUint32(src, dest);
+
+        m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+        m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+        m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+        return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+    }
+
+    // Result is undefined if the value is outside of the integer range.
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
+        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+    {
+        m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+        // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+        if (negZeroCheck)
+            failureCases.append(branchTest32(Zero, dest));
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+        convertInt32ToDouble(ARMRegisters::S0, scratch);
+        return branchDouble(DoubleNotEqual, reg, scratch);
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+        convertInt32ToDouble(ARMRegisters::S0, scratch);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+    }
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        ASSERT((static_cast(cond & 0x0fffffff)) == 0 && static_cast(cond) < static_cast(ARMAssembler::AL));
+        return static_cast(cond ^ 0x10000000);
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+
+    void memoryFence()
+    {
+        m_assembler.dmbSY();
+    }
+
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
+    }
+
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast(ARMAssembler::readCallTarget(call.dataLocation())));
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return ARMAssembler::maxJumpReplacementSize();
+    }
+
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARMAssembler::patchableJumpSize();
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        return label.labelAtOffset(0);
+    }
+
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+    {
+        ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast(initialValue) & 0xffff);
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+    ARMAssembler::Condition ARMCondition(RelationalCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    ARMAssembler::Condition ARMCondition(ResultCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    void ensureSpace(int insnSpace, int constSpace)
+    {
+        m_assembler.ensureSpace(insnSpace, constSpace);
+    }
+
+    int sizeOfConstantPool()
+    {
+        return m_assembler.sizeOfConstantPool();
+    }
+
+    void call32(RegisterID base, int32_t offset)
+    {
+        load32(Address(base, offset), ARMRegisters::S1);
+        m_assembler.blx(ARMRegisters::S1);
+    }
+
+private:
+    friend class LinkBuffer;
+
+    void internalCompare32(RegisterID left, TrustedImm32 right)
+    {
+        ARMWord tmp = (static_cast(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+        if (tmp != ARMAssembler::InvalidImmediate)
+            m_assembler.cmn(left, tmp);
+        else
+            m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+    }
+
+    void internalCompare32(Address left, TrustedImm32 right)
+    {
+        ARMWord tmp = (static_cast(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+        load32(left, ARMRegisters::S1);
+        if (tmp != ARMAssembler::InvalidImmediate)
+            m_assembler.cmn(ARMRegisters::S1, tmp);
+        else
+            m_assembler.cmp(ARMRegisters::S1, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+    }
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            ARMAssembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMAssembler::linkCall(code, call.m_label, function.value());
+    }
+
+
+#if ENABLE(MASM_PROBE)
+    inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+    {
+        return TrustedImm32(TrustedImmPtr(ptr));
+    }
+
+    inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+    {
+        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
+    }
+
+    inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+    {
+        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
+    }
+#endif
+
+    static const bool s_isVFPPresent;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/assembler/MacroAssemblerARM64.cpp b/assembler/MacroAssemblerARM64.cpp
new file mode 100644
index 0000000..8e7b51b
--- /dev/null
+++ b/assembler/MacroAssemblerARM64.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+
+#include 
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+using namespace ARM64Registers;
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerARM64::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+#define PTR_SIZE 8
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE)
+
+#define GPREG_SIZE 8
+#define PROBE_CPU_X0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_X1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_X2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_X3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_X4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_X5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_X6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_X7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_X8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_X9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_X10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_X11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_X12_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_X13_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_X14_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_X15_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+#define PROBE_CPU_X16_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_X17_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+#define PROBE_CPU_X18_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+#define PROBE_CPU_X19_OFFSET (PROBE_FIRST_GPREG_OFFSET + (19 * GPREG_SIZE))
+#define PROBE_CPU_X20_OFFSET (PROBE_FIRST_GPREG_OFFSET + (20 * GPREG_SIZE))
+#define PROBE_CPU_X21_OFFSET (PROBE_FIRST_GPREG_OFFSET + (21 * GPREG_SIZE))
+#define PROBE_CPU_X22_OFFSET (PROBE_FIRST_GPREG_OFFSET + (22 * GPREG_SIZE))
+#define PROBE_CPU_X23_OFFSET (PROBE_FIRST_GPREG_OFFSET + (23 * GPREG_SIZE))
+#define PROBE_CPU_X24_OFFSET (PROBE_FIRST_GPREG_OFFSET + (24 * GPREG_SIZE))
+#define PROBE_CPU_X25_OFFSET (PROBE_FIRST_GPREG_OFFSET + (25 * GPREG_SIZE))
+#define PROBE_CPU_X26_OFFSET (PROBE_FIRST_GPREG_OFFSET + (26 * GPREG_SIZE))
+#define PROBE_CPU_X27_OFFSET (PROBE_FIRST_GPREG_OFFSET + (27 * GPREG_SIZE))
+#define PROBE_CPU_X28_OFFSET (PROBE_FIRST_GPREG_OFFSET + (28 * GPREG_SIZE))
+#define PROBE_CPU_FP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (29 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (30 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (31 * GPREG_SIZE))
+
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (32 * GPREG_SIZE))
+#define PROBE_CPU_NZCV_OFFSET (PROBE_FIRST_GPREG_OFFSET + (33 * GPREG_SIZE))
+#define PROBE_CPU_FPSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (34 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (35 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_Q0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_Q1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_Q2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_Q3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_Q4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_Q5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_Q6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_Q7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_Q8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_Q9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_Q10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_Q11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_Q12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_Q13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_Q14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_Q15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+#define PROBE_CPU_Q16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#define PROBE_CPU_Q17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
+#define PROBE_CPU_Q18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
+#define PROBE_CPU_Q19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
+#define PROBE_CPU_Q20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
+#define PROBE_CPU_Q21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
+#define PROBE_CPU_Q22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
+#define PROBE_CPU_Q23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
+#define PROBE_CPU_Q24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
+#define PROBE_CPU_Q25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
+#define PROBE_CPU_Q26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
+#define PROBE_CPU_Q27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
+#define PROBE_CPU_Q28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
+#define PROBE_CPU_Q29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
+#define PROBE_CPU_Q30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
+#define PROBE_CPU_Q31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
+#define SAVED_CALLER_SP PROBE_SIZE
+#define PROBE_SIZE_PLUS_SAVED_CALLER_SP (SAVED_CALLER_SP + PTR_SIZE)
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM64::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x0) == PROBE_CPU_X0_OFFSET, ProbeContext_cpu_x0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x1) == PROBE_CPU_X1_OFFSET, ProbeContext_cpu_x1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x2) == PROBE_CPU_X2_OFFSET, ProbeContext_cpu_x2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x3) == PROBE_CPU_X3_OFFSET, ProbeContext_cpu_x3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x4) == PROBE_CPU_X4_OFFSET, ProbeContext_cpu_x4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x5) == PROBE_CPU_X5_OFFSET, ProbeContext_cpu_x5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x6) == PROBE_CPU_X6_OFFSET, ProbeContext_cpu_x6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x7) == PROBE_CPU_X7_OFFSET, ProbeContext_cpu_x7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x8) == PROBE_CPU_X8_OFFSET, ProbeContext_cpu_x8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x9) == PROBE_CPU_X9_OFFSET, ProbeContext_cpu_x9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x10) == PROBE_CPU_X10_OFFSET, ProbeContext_cpu_x10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x11) == PROBE_CPU_X11_OFFSET, ProbeContext_cpu_x11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x12) == PROBE_CPU_X12_OFFSET, ProbeContext_cpu_x12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x13) == PROBE_CPU_X13_OFFSET, ProbeContext_cpu_x13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x14) == PROBE_CPU_X14_OFFSET, ProbeContext_cpu_x14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x15) == PROBE_CPU_X15_OFFSET, ProbeContext_cpu_x15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x16) == PROBE_CPU_X16_OFFSET, ProbeContext_cpu_x16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x17) == PROBE_CPU_X17_OFFSET, ProbeContext_cpu_x17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x18) == PROBE_CPU_X18_OFFSET, ProbeContext_cpu_x18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x19) == PROBE_CPU_X19_OFFSET, ProbeContext_cpu_x19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x20) == PROBE_CPU_X20_OFFSET, ProbeContext_cpu_x20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x21) == PROBE_CPU_X21_OFFSET, ProbeContext_cpu_x21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x22) == PROBE_CPU_X22_OFFSET, ProbeContext_cpu_x22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x23) == PROBE_CPU_X23_OFFSET, ProbeContext_cpu_x23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x24) == PROBE_CPU_X24_OFFSET, ProbeContext_cpu_x24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x25) == PROBE_CPU_X25_OFFSET, ProbeContext_cpu_x25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x26) == PROBE_CPU_X26_OFFSET, ProbeContext_cpu_x26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x27) == PROBE_CPU_X27_OFFSET, ProbeContext_cpu_x27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x28) == PROBE_CPU_X28_OFFSET, ProbeContext_cpu_x28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fp) == PROBE_CPU_FP_OFFSET, ProbeContext_cpu_fp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.nzcv) == PROBE_CPU_NZCV_OFFSET, ProbeContext_cpu_nzcv_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpsr) == PROBE_CPU_FPSR_OFFSET, ProbeContext_cpu_fpsr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q0) == PROBE_CPU_Q0_OFFSET, ProbeContext_cpu_q0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q1) == PROBE_CPU_Q1_OFFSET, ProbeContext_cpu_q1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q2) == PROBE_CPU_Q2_OFFSET, ProbeContext_cpu_q2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q3) == PROBE_CPU_Q3_OFFSET, ProbeContext_cpu_q3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q4) == PROBE_CPU_Q4_OFFSET, ProbeContext_cpu_q4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q5) == PROBE_CPU_Q5_OFFSET, ProbeContext_cpu_q5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q6) == PROBE_CPU_Q6_OFFSET, ProbeContext_cpu_q6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q7) == PROBE_CPU_Q7_OFFSET, ProbeContext_cpu_q7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q8) == PROBE_CPU_Q8_OFFSET, ProbeContext_cpu_q8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q9) == PROBE_CPU_Q9_OFFSET, ProbeContext_cpu_q9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q10) == PROBE_CPU_Q10_OFFSET, ProbeContext_cpu_q10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q11) == PROBE_CPU_Q11_OFFSET, ProbeContext_cpu_q11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q12) == PROBE_CPU_Q12_OFFSET, ProbeContext_cpu_q12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q13) == PROBE_CPU_Q13_OFFSET, ProbeContext_cpu_q13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q14) == PROBE_CPU_Q14_OFFSET, ProbeContext_cpu_q14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q15) == PROBE_CPU_Q15_OFFSET, ProbeContext_cpu_q15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q16) == PROBE_CPU_Q16_OFFSET, ProbeContext_cpu_q16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q17) == PROBE_CPU_Q17_OFFSET, ProbeContext_cpu_q17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q18) == PROBE_CPU_Q18_OFFSET, ProbeContext_cpu_q18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q19) == PROBE_CPU_Q19_OFFSET, ProbeContext_cpu_q19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q20) == PROBE_CPU_Q20_OFFSET, ProbeContext_cpu_q20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q21) == PROBE_CPU_Q21_OFFSET, ProbeContext_cpu_q21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q22) == PROBE_CPU_Q22_OFFSET, ProbeContext_cpu_q22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q23) == PROBE_CPU_Q23_OFFSET, ProbeContext_cpu_q23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q24) == PROBE_CPU_Q24_OFFSET, ProbeContext_cpu_q24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q25) == PROBE_CPU_Q25_OFFSET, ProbeContext_cpu_q25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q26) == PROBE_CPU_Q26_OFFSET, ProbeContext_cpu_q26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q27) == PROBE_CPU_Q27_OFFSET, ProbeContext_cpu_q27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q28) == PROBE_CPU_Q28_OFFSET, ProbeContext_cpu_q28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q29) == PROBE_CPU_Q29_OFFSET, ProbeContext_cpu_q29_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q30) == PROBE_CPU_Q30_OFFSET, ProbeContext_cpu_q30_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q31) == PROBE_CPU_Q31_OFFSET, ProbeContext_cpu_q31_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(sizeof(MacroAssemblerARM64::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+
+asm (
+    ".text" "\n"
+    ".align 2" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARM64::probe() has already generated code to store some values.
+    // The top of stack (the caller save buffer) now looks like this:
+    //     sp[0 * ptrSize]: probeFunction
+    //     sp[1 * ptrSize]: arg1
+    //     sp[2 * ptrSize]: arg2
+    //     sp[3 * ptrSize]: address of arm64ProbeTrampoline()
+    //     sp[4 * ptrSize]: saved x27
+    //     sp[5 * ptrSize]: saved x28
+    //     sp[6 * ptrSize]: saved lr
+    //     sp[7 * ptrSize]: saved sp
+
+    "mov       x27, sp" "\n"
+    "mov       x28, sp" "\n"
+
+    "sub       x28, x28, #" STRINGIZE_VALUE_OF(PROBE_SIZE_PLUS_SAVED_CALLER_SP) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       x28, x28, #0xf" "\n"
+    "mov       sp, x28" "\n"
+
+    "str       x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n"
+    "str       x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n"
+    "str       x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n"
+    "str       x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n"
+    "str       x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n"
+    "str       x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n"
+    "str       x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n"
+    "str       x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n"
+    "str       x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n"
+    "str       x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n"
+    "str       x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n"
+    "str       x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n"
+    "str       x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n"
+    "str       x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n"
+    "str       x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n"
+    "str       x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n"
+    "str       x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n"
+    "str       x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n"
+    "str       x18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X18_OFFSET) "]" "\n"
+    "str       x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n"
+    "str       x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n"
+    "str       x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n"
+    "str       x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n"
+    "str       x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n"
+    "str       x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n"
+    "str       x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n"
+    "str       x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    "str       fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #7 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "mrs       x0, nzcv" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n"
+    "mrs       x0, fpsr" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n"
+
+    "ldr       x0, [x27, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       x0, [x27, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+
+    "str       d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n"
+    "str       d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n"
+    "str       d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n"
+    "str       d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n"
+    "str       d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n"
+    "str       d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n"
+    "str       d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n"
+    "str       d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n"
+    "str       d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n"
+    "str       d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n"
+    "str       d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n"
+    "str       d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n"
+    "str       d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n"
+    "str       d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n"
+    "str       d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n"
+    "str       d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n"
+    "str       d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n"
+    "str       d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n"
+    "str       d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n"
+    "str       d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n"
+    "str       d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n"
+    "str       d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n"
+    "str       d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n"
+    "str       d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n"
+    "str       d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n"
+    "str       d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n"
+    "str       d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n"
+    "str       d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n"
+    "str       d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n"
+    "str       d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n"
+    "str       d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n"
+    "str       d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n"
+
+    "mov       x28, sp" "\n" // Save the ProbeContext*.
+
+    "mov       x0, sp" "\n" // the ProbeContext* arg.
+    "ldr       x27, [x27, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "blr       x27" "\n"
+
+    "mov       sp, x28" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning. That is except for x18, pc and sp.
+
+    // x18 is "reserved for the platform. Conforming software should not make use of it."
+    // Hence, the JITs would not be using it, and the probe should also not be modifying it.
+    // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html.
+
+    // We can't modify the pc, because the only way to set its value on ARM64 is via
+    // an indirect branch or a ret, which means we'll need a free register to do so.
+    // The probe mechanism is required to not perturb any registers that the caller
+    // may use. Hence, we don't have this free register available.
+
+    // In order to return to the caller, we need to ret via lr. The probe mechanism will
+    // restore lr's value after returning to the caller by loading the restore value
+    // from the caller save buffer. The caller expects to access the caller save buffer via
+    // sp. Hence, we cannot allow sp to be modified by the probe.
+
+    "ldr       d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n"
+    "ldr       d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n"
+    "ldr       d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n"
+    "ldr       d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n"
+    "ldr       d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n"
+    "ldr       d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n"
+    "ldr       d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n"
+    "ldr       d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n"
+    "ldr       d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n"
+    "ldr       d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n"
+    "ldr       d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n"
+    "ldr       d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n"
+    "ldr       d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n"
+    "ldr       d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n"
+    "ldr       d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n"
+    "ldr       d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n"
+    "ldr       d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n"
+    "ldr       d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n"
+    "ldr       d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n"
+    "ldr       d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n"
+    "ldr       d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n"
+    "ldr       d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n"
+    "ldr       d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n"
+    "ldr       d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n"
+    "ldr       d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n"
+    "ldr       d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n"
+    "ldr       d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n"
+    "ldr       d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n"
+    "ldr       d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n"
+    "ldr       d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n"
+    "ldr       d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n"
+    "ldr       d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n"
+
+    "ldr       x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n"
+    "ldr       x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n"
+    "ldr       x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n"
+    "ldr       x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n"
+    "ldr       x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n"
+    "ldr       x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n"
+    "ldr       x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n"
+    "ldr       x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n"
+    "ldr       x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n"
+    "ldr       x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n"
+    "ldr       x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n"
+    "ldr       x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n"
+    "ldr       x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n"
+    "ldr       x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n"
+    "ldr       x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n"
+    "ldr       x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n"
+    "ldr       x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n"
+    "ldr       x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n"
+    // x18 should not be modified by the probe. See comment above for details.
+    "ldr       x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n"
+    "ldr       x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n"
+    "ldr       x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n"
+    "ldr       x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n"
+    "ldr       x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n"
+    "ldr       x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n"
+    "ldr       x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n"
+    "ldr       x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n"
+    "msr       fpsr, x27" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n"
+    "msr       nzcv, x27" "\n"
+    "ldr       fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n"
+
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    // There are 4 more registers left to restore: x27, x28, lr, sp, and pc.
+    // The JIT code's lr and sp will be restored by the caller.
+
+    // Restore pc by loading it into lr. The ret below will put in the pc.
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    // We need x27 as a scratch register to help with popping the ProbeContext.
+    // Hence, before we pop the ProbeContext, we need to copy the restore value
+    // for x27 from the ProbeContext to the caller save buffer.
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n"
+    "str       x27, [x28, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    // Since lr is also restored by the caller, we need to copy its restore
+    // value to the caller save buffer too.
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       x27, [x28, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    // We're now done with x28, and can restore its value.
+    "ldr       x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n"
+
+    // We're now done with the ProbeContext, and can pop it to restore sp so that
+    // it points to the caller save buffer.
+    "ldr       x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n"
+    "mov       sp, x27" "\n"
+
+    // We're now done with x27, and can restore it.
+    "ldr       x27, [sp, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+
+    "ret" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
+
+static void arm64ProbeTrampoline(MacroAssemblerARM64::ProbeContext* context)
+{
+    void* origSP = context->cpu.sp;
+    void* origPC = context->cpu.pc;
+    
+    context->probeFunction(context);
+    
+    if (context->cpu.sp != origSP) {
+        dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the SP. The change will be ignored\n");
+        context->cpu.sp = origSP;
+    }
+    if (context->cpu.pc != origPC) {
+        dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the PC. The change will be ignored\n");
+        context->cpu.pc = origPC;
+    }
+}
+
+void MacroAssemblerARM64::probe(MacroAssemblerARM64::ProbeFunction function, void* arg1, void* arg2)
+{
+    sub64(TrustedImm32(8 * 8), sp);
+
+    store64(x27, Address(sp, 4 * 8));
+    store64(x28, Address(sp, 5 * 8));
+    store64(lr, Address(sp, 6 * 8));
+
+    add64(TrustedImm32(8 * 8), sp, x28);
+    store64(x28, Address(sp, 7 * 8)); // Save original sp value.
+
+    move(TrustedImmPtr(reinterpret_cast(function)), x28);
+    store64(x28, Address(sp));
+    move(TrustedImmPtr(arg1), x28);
+    store64(x28, Address(sp, 1 * 8));
+    move(TrustedImmPtr(arg2), x28);
+    store64(x28, Address(sp, 2 * 8));
+    move(TrustedImmPtr(reinterpret_cast(arm64ProbeTrampoline)), x28);
+    store64(x28, Address(sp, 3 * 8));
+
+    move(TrustedImmPtr(reinterpret_cast(ctiMasmProbeTrampoline)), x28);
+    m_assembler.blr(x28);
+
+    // ctiMasmProbeTrampoline should have restored every register except for
+    // lr and the sp.
+    load64(Address(sp, 6 * 8), lr);
+    add64(TrustedImm32(8 * 8), sp);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
+
diff --git a/assembler/MacroAssemblerARM64.h b/assembler/MacroAssemblerARM64.h
new file mode 100644
index 0000000..0ab235f
--- /dev/null
+++ b/assembler/MacroAssemblerARM64.h
@@ -0,0 +1,3982 @@
+/*
+ * Copyright (C) 2012, 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARM64Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include 
+#include 
+
+namespace JSC {
+
+class MacroAssemblerARM64 : public AbstractMacroAssembler {
+public:
+    static const unsigned numGPRs = 32;
+    static const unsigned numFPRs = 32;
+    
+    static const RegisterID dataTempRegister = ARM64Registers::ip0;
+    static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+
+    RegisterID scratchRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return getCachedDataTempRegisterIDAndInvalidate();
+    }
+
+private:
+    static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
+    static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
+    static const intptr_t maskHalfWord0 = 0xffffl;
+    static const intptr_t maskHalfWord1 = 0xffff0000l;
+    static const intptr_t maskUpperWord = 0xffffffff00000000l;
+
+    // 4 instructions - 3 to load the function pointer, + blr.
+    static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
+    
+public:
+    MacroAssemblerARM64()
+        : m_dataMemoryTempRegister(this, dataTempRegister)
+        , m_cachedMemoryTempRegister(this, memoryTempRegister)
+        , m_makeJumpPatchable(false)
+    {
+    }
+
+    typedef ARM64Assembler::LinkRecord LinkRecord;
+    typedef ARM64Assembler::JumpType JumpType;
+    typedef ARM64Assembler::JumpLinkType JumpLinkType;
+    typedef ARM64Assembler::Condition Condition;
+
+    static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
+    static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
+
+    Vector& jumpsToLink() { return m_assembler.jumpsToLink(); }
+    void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+    static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+    static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
+
+    static const Scale ScalePtr = TimesEight;
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
+        return !(value & ~0x3ff8);
+    }
+
+    enum RelationalCondition {
+        Equal = ARM64Assembler::ConditionEQ,
+        NotEqual = ARM64Assembler::ConditionNE,
+        Above = ARM64Assembler::ConditionHI,
+        AboveOrEqual = ARM64Assembler::ConditionHS,
+        Below = ARM64Assembler::ConditionLO,
+        BelowOrEqual = ARM64Assembler::ConditionLS,
+        GreaterThan = ARM64Assembler::ConditionGT,
+        GreaterThanOrEqual = ARM64Assembler::ConditionGE,
+        LessThan = ARM64Assembler::ConditionLT,
+        LessThanOrEqual = ARM64Assembler::ConditionLE
+    };
+
+    enum ResultCondition {
+        Overflow = ARM64Assembler::ConditionVS,
+        Signed = ARM64Assembler::ConditionMI,
+        PositiveOrZero = ARM64Assembler::ConditionPL,
+        Zero = ARM64Assembler::ConditionEQ,
+        NonZero = ARM64Assembler::ConditionNE
+    };
+
+    enum ZeroCondition {
+        IsZero = ARM64Assembler::ConditionEQ,
+        IsNonZero = ARM64Assembler::ConditionNE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARM64Assembler::ConditionEQ,
+        DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+        DoubleGreaterThan = ARM64Assembler::ConditionGT,
+        DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
+        DoubleLessThan = ARM64Assembler::ConditionLO,
+        DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+        DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
+        DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
+        DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
+        DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
+        DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
+    };
+
+    static const RegisterID stackPointerRegister = ARM64Registers::sp;
+    static const RegisterID framePointerRegister = ARM64Registers::fp;
+    static const RegisterID linkRegister = ARM64Registers::lr;
+
+    // FIXME: Get reasonable implementations for these
+    static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+    static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+
+    // Integer operations:
+
+    void add32(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
+        m_assembler.add<32>(dest, a, b);
+    }
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.add<32>(dest, dest, src);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, dest, dest);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value))
+            m_assembler.add<32>(dest, src, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
+        else if (src != dest) {
+            move(imm, dest);
+            add32(src, dest);
+        } else {
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.add<32>(dest, src, dataTempRegister);
+        }
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, getCachedDataTempRegisterIDAndInvalidate());
+        add32(dataTempRegister, dest);
+    }
+
+    void add64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
+        if (b == ARM64Registers::sp)
+            std::swap(a, b);
+        m_assembler.add<64>(dest, a, b);
+    }
+
+    void add64(RegisterID src, RegisterID dest)
+    {
+        if (src == ARM64Registers::sp)
+            m_assembler.add<64>(dest, src, dest);
+        else
+            m_assembler.add<64>(dest, dest, src);
+    }
+
+    void add64(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(TrustedImm64 imm, RegisterID dest)
+    {
+        intptr_t immediate = imm.m_value;
+
+        if (isUInt12(immediate)) {
+            m_assembler.add<64>(dest, dest, UInt12(static_cast(immediate)));
+            return;
+        }
+        if (isUInt12(-immediate)) {
+            m_assembler.sub<64>(dest, dest, UInt12(static_cast(-immediate)));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dest, src, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, src, dataTempRegister);
+    }
+
+    void add64(TrustedImm32 imm, Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store64(dataTempRegister, address);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store64(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store64(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store64(dataTempRegister, address.m_ptr);
+    }
+
+    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add64(imm, srcDest);
+    }
+
+    void add64(Address src, RegisterID dest)
+    {
+        load64(src, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void add64(AbsoluteAddress src, RegisterID dest)
+    {
+        load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(dest, dest, dataTempRegister);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        and32(dest, src, dest);
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.and_<32>(dest, op1, op2);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        and32(imm, dest, dest);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<32>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<32>(dest, src, dataTempRegister);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        and32(dataTempRegister, dest);
+    }
+
+    void and64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        m_assembler.and_<64>(dest, src1, src2);
+    }
+
+    void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<64>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<64>(dest, src, dataTempRegister);
+    }
+
+    void and64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.and_<64>(dest, dest, src);
+    }
+
+    void and64(TrustedImm32 imm, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<64>(dest, dest, dataTempRegister);
+    }
+
+    void and64(TrustedImmPtr imm, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast(imm.m_value));
+
+        if (logicalImm.isValid()) {
+            m_assembler.and_<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.and_<64>(dest, dest, dataTempRegister);
+    }
+    
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.clz<32>(dest, src);
+    }
+
+    void countLeadingZeros64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.clz<64>(dest, src);
+    }
+
+    void countTrailingZeros32(RegisterID src, RegisterID dest)
+    {
+        // Arm does not have a count trailing zeros only a count leading zeros.
+        m_assembler.rbit<32>(dest, src);
+        m_assembler.clz<32>(dest, dest);
+    }
+
+    void countTrailingZeros64(RegisterID src, RegisterID dest)
+    {
+        // Arm does not have a count trailing zeros only a count leading zeros.
+        m_assembler.rbit<64>(dest, src);
+        m_assembler.clz<64>(dest, dest);
+    }
+
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_assembler.illegalInstruction();
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsl<32>(dest, src, shiftAmount);
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void lshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        lshift32(dest, shiftAmount, dest);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        lshift32(dest, imm, dest);
+    }
+
+    void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsl<64>(dest, src, shiftAmount);
+    }
+
+    void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
+    }
+
+    void lshift64(RegisterID shiftAmount, RegisterID dest)
+    {
+        lshift64(dest, shiftAmount, dest);
+    }
+
+    void lshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        lshift64(dest, imm, dest);
+    }
+
+    void mul32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.mul<32>(dest, left, right);
+    }
+    
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mul<32>(dest, dest, src);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.mul<32>(dest, src, dataTempRegister);
+    }
+
+    void mul64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mul<64>(dest, dest, src);
+    }
+
+    void mul64(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.mul<64>(dest, left, right);
+    }
+
+    void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+    {
+        m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
+    }
+
+    void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+    {
+        m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
+    }
+
+    void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+    {
+        m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
+    }
+
+    void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+    {
+        m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
+    }
+
+    void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+    {
+        m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
+    }
+
+    void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+    {
+        m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
+    }
+
+    void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.sdiv<32>(dest, dividend, divisor);
+    }
+
+    void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.sdiv<64>(dest, dividend, divisor);
+    }
+
+    void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.udiv<32>(dest, dividend, divisor);
+    }
+
+    void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+    {
+        m_assembler.udiv<64>(dest, dividend, divisor);
+    }
+
+    void neg32(RegisterID dest)
+    {
+        m_assembler.neg<32>(dest, dest);
+    }
+
+    void neg64(RegisterID dest)
+    {
+        m_assembler.neg<64>(dest, dest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        or32(dest, src, dest);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orr<32>(dest, op1, op2);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        or32(imm, dest, dest);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<32>(dest, src, logicalImm);
+            return;
+        }
+
+        ASSERT(src != dataTempRegister);
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<32>(dest, src, dataTempRegister);
+    }
+
+    void or32(RegisterID src, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+        if (logicalImm.isValid()) {
+            load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
+            store32(dataTempRegister, address.m_ptr);
+        } else {
+            load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+            or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
+            store32(dataTempRegister, address.m_ptr);
+        }
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        or32(imm, dataTempRegister, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    void or64(RegisterID src, RegisterID dest)
+    {
+        or64(dest, src, dest);
+    }
+
+    void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orr<64>(dest, op1, op2);
+    }
+
+    void or64(TrustedImm32 imm, RegisterID dest)
+    {
+        or64(imm, dest, dest);
+    }
+
+    void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, src, logicalImm);
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, src, dataTempRegister);
+    }
+
+    void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, src, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, src, dataTempRegister);
+    }
+
+    void or64(TrustedImm64 imm, RegisterID dest)
+    {
+        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
+
+        if (logicalImm.isValid()) {
+            m_assembler.orr<64>(dest, dest, logicalImm);
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.orr<64>(dest, dest, dataTempRegister);
+    }
+
+    void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.ror<32>(dest, src, imm.m_value & 31);
+    }
+
+    void rotateRight32(TrustedImm32 imm, RegisterID srcDst)
+    {
+        rotateRight32(srcDst, imm, srcDst);
+    }
+
+    void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+    {
+        m_assembler.ror<32>(dest, src, shiftAmmount);
+    }
+
+    void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.ror<64>(dest, src, imm.m_value & 63);
+    }
+
+    void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+    {
+        rotateRight64(srcDst, imm, srcDst);
+    }
+
+    void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+    {
+        m_assembler.ror<64>(dest, src, shiftAmmount);
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.asr<32>(dest, src, shiftAmount);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void rshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        rshift32(dest, shiftAmount, dest);
+    }
+    
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        rshift32(dest, imm, dest);
+    }
+    
+    void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.asr<64>(dest, src, shiftAmount);
+    }
+    
+    void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
+    }
+    
+    void rshift64(RegisterID shiftAmount, RegisterID dest)
+    {
+        rshift64(dest, shiftAmount, dest);
+    }
+    
+    void rshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        rshift64(dest, imm, dest);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub<32>(dest, dest, src);
+    }
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.sub<32>(dest, left, right);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<32>(dest, dest, dataTempRegister);
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value))
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+        else if (isUInt12(-imm.m_value))
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+        else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+            return;
+        }
+
+        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, getCachedDataTempRegisterIDAndInvalidate());
+        sub32(dataTempRegister, dest);
+    }
+
+    void sub64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub<64>(dest, dest, src);
+    }
+
+    void sub64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        m_assembler.sub<64>(dest, a, b);
+    }
+    
+    void sub64(TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
+            return;
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
+            return;
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<64>(dest, dest, dataTempRegister);
+    }
+    
+    void sub64(TrustedImm64 imm, RegisterID dest)
+    {
+        intptr_t immediate = imm.m_value;
+
+        if (isUInt12(immediate)) {
+            m_assembler.sub<64>(dest, dest, UInt12(static_cast(immediate)));
+            return;
+        }
+        if (isUInt12(-immediate)) {
+            m_assembler.add<64>(dest, dest, UInt12(static_cast(-immediate)));
+            return;
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.sub<64>(dest, dest, dataTempRegister);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsr<32>(dest, src, shiftAmount);
+    }
+    
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
+    }
+
+    void urshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift32(dest, shiftAmount, dest);
+    }
+    
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        urshift32(dest, imm, dest);
+    }
+
+    void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.lsr<64>(dest, src, shiftAmount);
+    }
+    
+    void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
+    }
+
+    void urshift64(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift64(dest, shiftAmount, dest);
+    }
+    
+    void urshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        urshift64(dest, imm, dest);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        xor32(dest, src, dest);
+    }
+
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eor<32>(dest, op1, op2);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        xor32(imm, dest, dest);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<32>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<32>(dest, src, logicalImm);
+                return;
+            }
+
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<32>(dest, src, dataTempRegister);
+        }
+    }
+
+    void xor64(RegisterID src, Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
+        store64(dataTempRegister, address);
+    }
+
+    void xor64(RegisterID src, RegisterID dest)
+    {
+        xor64(dest, src, dest);
+    }
+
+    void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eor<64>(dest, op1, op2);
+    }
+
+    void xor64(TrustedImm32 imm, RegisterID dest)
+    {
+        xor64(imm, dest, dest);
+    }
+
+    void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<64>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<64>(dest, src, logicalImm);
+                return;
+            }
+
+            move(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<64>(dest, src, dataTempRegister);
+        }
+    }
+
+    void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn<64>(dest, src);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value)));
+
+            if (logicalImm.isValid()) {
+                m_assembler.eor<64>(dest, src, logicalImm);
+                return;
+            }
+
+            signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.eor<64>(dest, src, dataTempRegister);
+        }
+    }
+
+    void not32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mvn<32>(dest, src);
+    }
+
+    void not64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mvn<64>(dest, src);
+    }
+
+    void not64(RegisterID srcDst)
+    {
+        m_assembler.mvn<64>(srcDst, srcDst);
+    }
+
+    // Memory access operations:
+
+    void load64(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void load64(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void load64(const void* address, RegisterID dest)
+    {
+        load<64>(address, dest);
+    }
+
+    void load64(RegisterID src, PostIndex simm, RegisterID dest)
+    {
+        m_assembler.ldr<64>(dest, src, simm);
+    }
+
+    DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+    
+    DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        DataLabelCompact label(this);
+        m_assembler.ldr<64>(dest, address.base, address.offset);
+        return label;
+    }
+
+    void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        loadPair64(src, TrustedImm32(0), dest1, dest2);
+    }
+
+    void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
+    }
+
+    void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
+    }
+
+    void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm64(misc), memoryTempRegister);
+        abortWithReason(reason);
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+        ASSERT(!(address.offset & ~0xff8));
+        m_assembler.ldr<64>(dest, address.base, address.offset);
+        return result;
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        load<32>(address, dest);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+    
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        DataLabelCompact label(this);
+        m_assembler.ldr<32>(dest, address.base, address.offset);
+        return label;
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<16>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrh(dest, address.base, memoryTempRegister);
+    }
+    
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrh(dest, address.base, memoryTempRegister);
+    }
+    
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        load16(address, dest);
+    }
+
+    void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void zeroExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxth<32>(dest, src);
+    }
+
+    void signExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxth<32>(dest, src);
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadWithOffset<8>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrb(dest, address.base, memoryTempRegister);
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrb(dest, address.base, memoryTempRegister);
+    }
+    
+    void load8(const void* address, RegisterID dest)
+    {
+        moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
+        m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
+        if (dest == memoryTempRegister)
+            cachedMemoryTempRegister().invalidate();
+    }
+
+    void load8(RegisterID src, PostIndex simm, RegisterID dest)
+    {
+        m_assembler.ldrb(dest, src, simm);
+    }
+
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void load8SignedExtendTo32(const void* address, RegisterID dest)
+    {
+        moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
+        m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr);
+        if (dest == memoryTempRegister)
+            cachedMemoryTempRegister().invalidate();
+    }
+
+    void zeroExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxtb<32>(dest, src);
+    }
+
+    void signExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxtb<32>(dest, src);
+    }
+
+    void store64(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<64>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+
+    void store64(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+    
+    void store64(RegisterID src, const void* address)
+    {
+        store<64>(src, address);
+    }
+
+    void store64(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store64(TrustedImm64(imm.m_value), address);
+    }
+
+    void store64(TrustedImm64 imm, ImplicitAddress address)
+    {
+        if (!imm.m_value) {
+            store64(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, dataMemoryTempRegister());
+        store64(dataTempRegister, address);
+    }
+
+    void store64(TrustedImm64 imm, BaseIndex address)
+    {
+        if (!imm.m_value) {
+            store64(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, dataMemoryTempRegister());
+        store64(dataTempRegister, address);
+    }
+
+    void store64(RegisterID src, RegisterID dest, PostIndex simm)
+    {
+        m_assembler.str<64>(src, dest, simm);
+    }
+    
+    DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+
+    void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        storePair64(src1, src2, dest, TrustedImm32(0));
+    }
+
+    void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+    {
+        m_assembler.stp<64>(src1, src2, dest, offset.m_value);
+    }
+
+    void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
+    }
+
+    void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+    {
+        m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<32>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void store32(RegisterID src, const void* address)
+    {
+        store<32>(src, address);
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, dataMemoryTempRegister());
+        store32(dataTempRegister, address);
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, dataMemoryTempRegister());
+        store32(dataTempRegister, address);
+    }
+
+    void store32(TrustedImm32 imm, const void* address)
+    {
+        if (!imm.m_value) {
+            store32(ARM64Registers::zr, address);
+            return;
+        }
+
+        moveToCachedReg(imm, dataMemoryTempRegister());
+        store32(dataTempRegister, address);
+    }
+
+    void storeZero32(ImplicitAddress address)
+    {
+        store32(ARM64Registers::zr, address);
+    }
+
+    void storeZero32(BaseIndex address)
+    {
+        store32(ARM64Registers::zr, address);
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label(this);
+        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
+        return label;
+    }
+
+    void store16(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<16>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strh(src, address.base, memoryTempRegister);
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 1)) {
+            m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.strh(src, address.base, memoryTempRegister);
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+        if (!address.offset && !address.scale) {
+            m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.strb(src, address.base, memoryTempRegister);
+    }
+
+    void store8(RegisterID src, void* address)
+    {
+        move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strb(src, memoryTempRegister, 0);
+    }
+
+    void store8(RegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<8>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.strb(src, address.base, memoryTempRegister);
+    }
+
+    void store8(TrustedImm32 imm, void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value) {
+            store8(ARM64Registers::zr, address);
+            return;
+        }
+
+        move(imm8, getCachedDataTempRegisterIDAndInvalidate());
+        store8(dataTempRegister, address);
+    }
+
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value) {
+            store8(ARM64Registers::zr, address);
+            return;
+        }
+
+        move(imm8, getCachedDataTempRegisterIDAndInvalidate());
+        store8(dataTempRegister, address);
+    }
+
+    void store8(RegisterID src, RegisterID dest, PostIndex simm)
+    {
+        m_assembler.strb(src, dest, simm);
+    }
+
+    // Floating-point operations:
+
+    static bool supportsFloatingPoint() { return true; }
+    static bool supportsFloatingPointTruncate() { return true; }
+    static bool supportsFloatingPointSqrt() { return true; }
+    static bool supportsFloatingPointAbs() { return true; }
+    static bool supportsFloatingPointRounding() { return true; }
+
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+
+    void absDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fabs<64>(dest, src);
+    }
+
+    void absFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fabs<32>(dest, src);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        addDouble(dest, src, dest);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fadd<64>(dest, op1, op2);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fadd<32>(dest, op1, op2);
+    }
+
+    void ceilDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintp<64>(dest, src);
+    }
+
+    void ceilFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintp<32>(dest, src);
+    }
+
+    void floorDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintm<64>(dest, src);
+    }
+
+    void floorFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintm<32>(dest, src);
+    }
+
+    void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintn<64>(dest, src);
+    }
+
+    void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintn<32>(dest, src);
+    }
+
+    void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintz<64>(dest, src);
+    }
+
+    void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.frintz<32>(dest, src);
+    }
+
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+    {
+        m_assembler.fcvtns<32, 64>(dest, src);
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        m_assembler.scvtf<64, 32>(fpTempRegister, dest);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+        // Test for negative zero.
+        if (negZeroCheck) {
+            Jump valueIsNonZero = branchTest32(NonZero, dest);
+            RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
+            m_assembler.fmov<64>(scratch, src);
+            failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
+            valueIsNonZero.link(this);
+        }
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.fcmp<64>(left, right);
+        return jumpAfterFloatingPointCompare(cond);
+    }
+
+    Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.fcmp<32>(left, right);
+        return jumpAfterFloatingPointCompare(cond);
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.fcmp_0<64>(reg);
+        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+        Jump result = makeBranch(ARM64Assembler::ConditionNE);
+        unordered.link(this);
+        return result;
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.fcmp_0<64>(reg);
+        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+        Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+        unordered.link(this);
+        // We get here if either unordered or equal.
+        Jump result = jump();
+        notEqual.link(this);
+        return result;
+    }
+
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
+        m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
+        zeroExtend32ToPtr(dataTempRegister, dest);
+        // Check the low 32-bits sign extend to be equal to the full value.
+        m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
+        return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
+    }
+
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fcvt<32, 64>(dest, src);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fcvt<64, 32>(dest, src);
+    }
+    
+    void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+    
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<64, 32>(dest, src);
+    }
+
+    void convertInt32ToDouble(Address address, FPRegisterID dest)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        convertInt32ToDouble(dataTempRegister, dest);
+    }
+
+    void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<32, 32>(dest, src);
+    }
+    
+    void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<64, 64>(dest, src);
+    }
+
+    void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.scvtf<32, 64>(dest, src);
+    }
+
+    void convertUInt64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ucvtf<64, 64>(dest, src);
+    }
+
+    void convertUInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ucvtf<32, 64>(dest, src);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        divDouble(dest, src, dest);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fdiv<64>(dest, op1, op2);
+    }
+
+    void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fdiv<32>(dest, op1, op2);
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
+    }
+    
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+        moveToCachedReg(address, cachedMemoryTempRegister());
+        m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    void loadFloat(ImplicitAddress address, FPRegisterID dest)
+    {
+        if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        m_assembler.fmov<64>(reg, ARM64Registers::zr);
+    }
+
+    void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void moveFloatTo32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fmov<32>(dest, src);
+    }
+
+    void move64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<64>(dest, src);
+    }
+
+    void move32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmov<32>(dest, src);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    template
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, src is copied to dest and the
+            // next csel has all arguments equal to src.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.csel(dest, src, dest, ARM64Condition(cond));
+    }
+
+    template
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, thenCase is copied to elseCase and the
+            // next csel has all arguments equal to thenCase.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.csel(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+            m_assembler.csel(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.csel(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    template
+    void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (cond == DoubleNotEqual) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            m_assembler.fcsel(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            // If the compare is unordered, thenCase is copied to elseCase and the
+            // next csel has all arguments equal to thenCase.
+            // If the compare is ordered, dest is unchanged and EQ decides
+            // what value to set.
+            m_assembler.fcsel(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+            m_assembler.fcsel(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+            return;
+        }
+        m_assembler.fcsel(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.fcmp<64>(left, right);
+        moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.fcmp<32>(left, right);
+        moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        mulDouble(dest, src, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fmul<64>(dest, op1, op2);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        mulDouble(fpTempRegister, dest);
+    }
+
+    void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fmul<32>(dest, op1, op2);
+    }
+
+    void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vand<64>(dest, op1, op2);
+    }
+
+    void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        andDouble(op1, op2, dest);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fneg<64>(dest, src);
+    }
+
+    void negateFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fneg<32>(dest, src);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fsqrt<64>(dest, src);
+    }
+
+    void sqrtFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fsqrt<32>(dest, src);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<64>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+        moveToCachedReg(address, cachedMemoryTempRegister());
+        m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 3)) {
+            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<64>(src, address.base, memoryTempRegister);
+    }
+
+    void storeFloat(FPRegisterID src, ImplicitAddress address)
+    {
+        if (tryStoreWithOffset<32>(src, address.base, address.offset))
+            return;
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+    
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        if (!address.offset && (!address.scale || address.scale == 2)) {
+            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+            return;
+        }
+
+        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
+        m_assembler.str<32>(src, address.base, memoryTempRegister);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        subDouble(dest, src, dest);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fsub<64>(dest, op1, op2);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        subDouble(fpTempRegister, dest);
+    }
+
+    void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.fsub<32>(dest, op1, op2);
+    }
+
+    // Result is undefined if the value is outside of the integer range.
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<32, 64>(dest, src);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<32, 64>(dest, src);
+    }
+
+    void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<64, 64>(dest, src);
+    }
+
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+    {
+        truncateDoubleToUint64(src, dest);
+    }
+
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<64, 64>(dest, src);
+    }
+
+    void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<32, 32>(dest, src);
+    }
+
+    void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<32, 32>(dest, src);
+    }
+
+    void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzs<64, 32>(dest, src);
+    }
+
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+    {
+        truncateFloatToUint64(src, dest);
+    }
+
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.fcvtzu<64, 32>(dest, src);
+    }
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data. Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack. These operations are not supported on
+    // ARM64. Peek and poke operations read or write values on the
+    // stack, without moving the current stack position. Additionally,
+    // there are popToRestore and pushToSave operations, which are
+    // designed just for quick-and-dirty saving and restoring of
+    // temporary values. These operations don't claim to have any
+    // ABI compatibility.
+    
+    void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(RegisterID) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(Address) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
+    {
+        CRASH();
+    }
+
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
+    }
+
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
+    }
+
+    void popToRestore(RegisterID dest)
+    {
+        m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
+    }
+
+    void pushToSave(RegisterID src)
+    {
+        m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
+    }
+    
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+    {
+        RegisterID reg = dataTempRegister;
+        pushPair(reg, reg);
+        move(imm, reg);
+        store64(reg, stackPointerRegister);
+        load64(Address(stackPointerRegister, 8), reg);
+    }
+
+    void pushToSave(Address address)
+    {
+        load32(address, getCachedDataTempRegisterIDAndInvalidate());
+        pushToSave(dataTempRegister);
+    }
+
+    void pushToSave(TrustedImm32 imm)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        pushToSave(dataTempRegister);
+    }
+    
+    void popToRestore(FPRegisterID dest)
+    {
+        loadDouble(stackPointerRegister, dest);
+        add64(TrustedImm32(16), stackPointerRegister);
+    }
+    
+    void pushToSave(FPRegisterID src)
+    {
+        sub64(TrustedImm32(16), stackPointerRegister);
+        storeDouble(src, stackPointerRegister);
+    }
+
+    static ptrdiff_t pushToSaveByteOffset() { return 16; }
+
+    // Register move operations:
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.mov<64>(dest, src);
+    }
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        moveInternal(imm, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        moveInternal(imm, dest);
+    }
+
+    void move(TrustedImm64 imm, RegisterID dest)
+    {
+        moveInternal(imm, dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        move(reg1, getCachedDataTempRegisterIDAndInvalidate());
+        move(reg2, reg1);
+        move(dataTempRegister, reg2);
+    }
+
+    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        move(TrustedImmPtr(reinterpret_cast(static_cast(imm.m_value))), dest);
+    }
+    
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sxtw(dest, src);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.uxtw(dest, src);
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.tst<32>(testReg, mask);
+        m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.tst<32>(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        test32(left, right);
+        m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.tst<64>(testReg, mask);
+        m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.tst<64>(left, right);
+        m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.tst<32>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        test32(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        m_assembler.tst<64>(left, right);
+        m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+    }
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmp<32>(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest32(*resultCondition, left, left);
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, left, memoryTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        if (right == ARM64Registers::sp) {
+            if (cond == Equal && left != ARM64Registers::sp) {
+                // CMP can only use SP for the left argument, since we are testing for equality, the order
+                // does not matter here.
+                std::swap(left, right);
+            } else {
+                move(right, getCachedDataTempRegisterIDAndInvalidate());
+                right = dataTempRegister;
+            }
+        }
+        m_assembler.cmp<64>(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<64>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<64>(left, UInt12(-right.m_value));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+    {
+        intptr_t immediate = right.m_value;
+        if (!immediate) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+
+        if (isUInt12(immediate))
+            m_assembler.cmp<64>(left, UInt12(static_cast(immediate)));
+        else if (isUInt12(-immediate))
+            m_assembler.cmn<64>(left, UInt12(static_cast(-immediate)));
+        else {
+            moveToCachedReg(right, dataMemoryTempRegister());
+            m_assembler.cmp<64>(left, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, left, memoryTempRegister);
+    }
+
+    Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branch64(cond, dataTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch64(cond, memoryTempRegister, right);
+    }
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
+    }
+    
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branch32(cond, memoryTempRegister, right8);
+    }
+    
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        if (reg == mask && (cond == Zero || cond == NonZero))
+            return Jump(makeCompareAndBranch<32>(static_cast(cond), reg));
+        m_assembler.tst<32>(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst<32>(reg, reg);
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+            if (logicalImm.isValid())
+                m_assembler.tst<32>(reg, logicalImm);
+            else {
+                move(mask, getCachedDataTempRegisterIDAndInvalidate());
+                m_assembler.tst<32>(reg, dataTempRegister);
+            }
+        }
+    }
+
+    Jump branch(ResultCondition cond)
+    {
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<32>(static_cast(cond), reg));
+            m_assembler.tst<32>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+            if (logicalImm.isValid()) {
+                m_assembler.tst<32>(reg, logicalImm);
+                return Jump(makeBranch(cond));
+            }
+
+            move(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<32>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branchTest32(cond, memoryTempRegister, mask);
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        return branchTest32(cond, memoryTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        if (reg == mask && (cond == Zero || cond == NonZero))
+            return Jump(makeCompareAndBranch<64>(static_cast(cond), reg));
+        m_assembler.tst<64>(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<64>(static_cast(cond), reg));
+            m_assembler.tst<64>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.tst<64>(reg, logicalImm);
+                return Jump(makeBranch(cond));
+            }
+
+            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
+    {
+        if (mask.m_value == -1) {
+            if ((cond == Zero) || (cond == NonZero))
+                return Jump(makeCompareAndBranch<64>(static_cast(cond), reg));
+            m_assembler.tst<64>(reg, reg);
+        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond)));
+        else {
+            LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+            if (logicalImm.isValid()) {
+                m_assembler.tst<64>(reg, logicalImm);
+                return Jump(makeBranch(cond));
+            }
+
+            move(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(reg, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest64(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        move(TrustedImmPtr(reinterpret_cast(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
+
+        if (MacroAssemblerHelpers::isUnsigned(cond))
+            m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
+        else
+            m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister);
+
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        return branch32(cond, left, right);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation. The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.add<32, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchAdd32(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        load32(src, getCachedDataTempRegisterIDAndInvalidate());
+        return branchAdd32(cond, dest, dataTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, src, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, imm, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+        } else if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
+            store32(dataTempRegister, address.m_ptr);
+        } else {
+            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
+            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
+            store32(dataTempRegister, address.m_ptr);
+        }
+
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.add<64, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchAdd64(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd64(cond, dest, src, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd64(cond, dest, imm, dest);
+    }
+
+    Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT(isUInt12(imm.m_value));
+        m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
+    {
+        ASSERT(cond != Signed);
+
+        if (cond != Overflow) {
+            m_assembler.mul<32>(dest, src1, src2);
+            return branchTest32(cond, dest);
+        }
+
+        // This is a signed multiple of two 32-bit values, producing a 64-bit result.
+        m_assembler.smull(dest, src1, src2);
+        // Copy bits 63..32 of the result to bits 31..0 of scratch1.
+        m_assembler.asr<64>(scratch1, dest, 32);
+        // Splat bit 31 of the result to bits 31..0 of scratch2.
+        m_assembler.asr<32>(scratch2, dest, 31);
+        // After a mul32 the top 32 bits of the register should be clear.
+        zeroExtend32ToPtr(dest, dest);
+        // Check that bits 31..63 of the original result were all equal.
+        return branch32(NotEqual, scratch2, scratch1);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchMul32(cond, dest, src, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchMul32(cond, dataTempRegister, src, dest);
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
+    {
+        ASSERT(cond != Signed);
+
+        // This is a signed multiple of two 64-bit values, producing a 64-bit result.
+        m_assembler.mul<64>(dest, src1, src2);
+
+        if (cond != Overflow)
+            return branchTest64(cond, dest);
+
+        // Compute bits 127..64 of the result into scratch1.
+        m_assembler.smulh(scratch1, src1, src2);
+        // Splat bit 63 of the result to bits 63..0 of scratch2.
+        m_assembler.asr<64>(scratch2, dest, 63);
+        // Check that bits 31..63 of the original result were all equal.
+        return branch64(NotEqual, scratch2, scratch1);
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchMul64(cond, dest, src, dest);
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID dest)
+    {
+        m_assembler.neg<32, S>(dest, dest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
+    {
+        m_assembler.neg<64, S>(srcDest, srcDest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID dest)
+    {
+        m_assembler.neg<32, S>(dest, dest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.sub<32, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchSub32(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub32(cond, dest, src, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub32(cond, dest, imm, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.sub<64, S>(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        if (isUInt12(imm.m_value)) {
+            m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+        if (isUInt12(-imm.m_value)) {
+            m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
+            return Jump(makeBranch(cond));
+        }
+
+        move(imm, getCachedDataTempRegisterIDAndInvalidate());
+        return branchSub64(cond, op1, dataTempRegister, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub64(cond, dest, src, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub64(cond, dest, imm, dest);
+    }
+
+    Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT(isUInt12(imm.m_value));
+        m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
+        return Jump(makeBranch(cond));
+    }
+
+
+    // Jumps, calls, returns
+
+    ALWAYS_INLINE Call call()
+    {
+        AssemblerLabel pointerLabel = m_assembler.label();
+        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+        invalidateAllTempRegisters();
+        m_assembler.blr(dataTempRegister);
+        AssemblerLabel callLabel = m_assembler.label();
+        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+        return Call(callLabel, Call::Linkable);
+    }
+
+    ALWAYS_INLINE Call call(RegisterID target)
+    {
+        invalidateAllTempRegisters();
+        m_assembler.blr(target);
+        return Call(m_assembler.label(), Call::None);
+    }
+
+    ALWAYS_INLINE Call call(Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        return call(dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump jump()
+    {
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.b();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.br(target);
+    }
+
+    void jump(Address address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+    }
+    
+    void jump(BaseIndex address)
+    {
+        load64(address, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+    }
+
+    void jump(AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
+        load64(Address(dataTempRegister), dataTempRegister);
+        m_assembler.br(dataTempRegister);
+    }
+
+    ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+    ALWAYS_INLINE Call nearCall()
+    {
+        m_assembler.bl();
+        return Call(m_assembler.label(), Call::LinkableNear);
+    }
+
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.b();
+        return Call(label, Call::LinkableNearTail);
+    }
+
+    ALWAYS_INLINE void ret()
+    {
+        m_assembler.ret();
+    }
+
+    ALWAYS_INLINE Call tailRecursiveCall()
+    {
+        // Like a normal call, but don't link.
+        AssemblerLabel pointerLabel = m_assembler.label();
+        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.br(dataTempRegister);
+        AssemblerLabel callLabel = m_assembler.label();
+        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
+        return Call(callLabel, Call::Linkable);
+    }
+
+
+    // Comparisons operations
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp<32>(left, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        load32(left, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.cmp<32>(dataTempRegister, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test32(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        if (isUInt12(right.m_value))
+            m_assembler.cmp<32>(left, UInt12(right.m_value));
+        else if (isUInt12(-right.m_value))
+            m_assembler.cmn<32>(left, UInt12(-right.m_value));
+        else {
+            move(right, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.cmp<32>(left, dataTempRegister);
+        }
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp<64>(left, right);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+    
+    void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test64(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
+        m_assembler.cmp<64>(left, dataTempRegister);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+        move(right8, getCachedDataTempRegisterIDAndInvalidate());
+        compare32(cond, memoryTempRegister, dataTempRegister, dest);
+    }
+
+    void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.tst<32>(src, mask);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+    {
+        test32(src, mask);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+        test32(cond, memoryTempRegister, mask, dest);
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate());
+        test32(cond, memoryTempRegister, mask8, dest);
+    }
+
+    void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.tst<64>(op1, op2);
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.tst<64>(src, src);
+        else {
+            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
+            m_assembler.tst<64>(src, dataTempRegister);
+        }
+        m_assembler.cset<32>(dest, ARM64Condition(cond));
+    }
+
+    void setCarry(RegisterID dest)
+    {
+        m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
+    }
+
+    // Patchable operations
+
+    ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+    {
+        DataLabel32 label(this);
+        moveWithFixedWidth(imm, dest);
+        return label;
+    }
+
+    ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
+    {
+        DataLabelPtr label(this);
+        moveWithFixedWidth(imm, dest);
+        return label;
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = DataLabelPtr(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch64(cond, left, dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = DataLabelPtr(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch64(cond, left, dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        dataLabel = DataLabel32(this);
+        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch64(cond, left, TrustedImm64(right));
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchTest32(cond, reg, mask);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, reg, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch64(cond, reg, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch64(cond, left, right);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableJump()
+    {
+        m_makeJumpPatchable = true;
+        Jump result = jump();
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr label(this);
+        moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
+        store64(dataTempRegister, address);
+        return label;
+    }
+
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+    {
+        return storePtrWithPatch(TrustedImmPtr(0), address);
+    }
+
+    static void reemitInitialMoveWithPatch(void* address, void* value)
+    {
+        ARM64Assembler::setPointer(static_cast(address), value, dataTempRegister, true);
+    }
+
+    // Miscellaneous operations:
+
+    void breakpoint(uint16_t imm = 0)
+    {
+        m_assembler.brk(imm);
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+    
+    // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64.
+    void memoryFence()
+    {
+        m_assembler.dmbISH();
+    }
+
+    // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST").
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
+    }
+
+    // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this
+    // using dependencies or half fences, but there are cases where this is as good as it gets. The only
+    // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like
+    // the memoryFence().
+    void loadFence()
+    {
+        m_assembler.dmbISH();
+    }
+
+    // Misc helper functions.
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        return static_cast(ARM64Assembler::invert(static_cast(cond)));
+    }
+
+    static std::optional commuteCompareToZeroIntoTest(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+            return Zero;
+        case NotEqual:
+            return NonZero;
+        case LessThan:
+            return Signed;
+        case GreaterThanOrEqual:
+            return PositiveOrZero;
+            break;
+        default:
+            return std::nullopt;
+        }
+    }
+
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast(ARM64Assembler::readCallTarget(call.dataLocation())));
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return ARM64Assembler::maxJumpReplacementSize();
+    }
+
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARM64Assembler::patchableJumpSize();
+    }
+
+    RegisterID scratchRegisterForBlinding()
+    {
+        // We *do not* have a scratch register for blinding.
+        RELEASE_ASSERT_NOT_REACHED();
+        return getCachedDataTempRegisterIDAndInvalidate();
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+    
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        return label.labelAtOffset(0);
+    }
+    
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+    {
+        reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
+    }
+    
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+    }
+
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+    ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
+    {
+        m_assembler.b_cond(cond);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
+    }
+    ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
+
+    template 
+    ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
+    {
+        if (cond == IsZero)
+            m_assembler.cbz(reg);
+        else
+            m_assembler.cbnz(reg);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast(cond), dataSize == 64, reg);
+    }
+
+    ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
+    {
+        ASSERT(bit < 64);
+        bit &= 0x3f;
+        if (cond == IsZero)
+            m_assembler.tbz(reg, bit);
+        else
+            m_assembler.tbnz(reg, bit);
+        AssemblerLabel label = m_assembler.label();
+        m_assembler.nop();
+        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast(cond), bit, reg);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
+    {
+        return static_cast(cond);
+    }
+    
+private:
+    ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return dataMemoryTempRegister().registerIDInvalidate();
+    }
+    ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return cachedMemoryTempRegister().registerIDInvalidate();
+    }
+    ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return m_dataMemoryTempRegister;
+    }
+    ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return m_cachedMemoryTempRegister;
+    }
+
+    ALWAYS_INLINE bool isInIntRange(intptr_t value)
+    {
+        return value == ((value << 32) >> 32);
+    }
+
+    template
+    void moveInternal(ImmediateType imm, RegisterID dest)
+    {
+        const int dataSize = sizeof(rawType) * 8;
+        const int numberHalfWords = dataSize / 16;
+        rawType value = bitwise_cast(imm.m_value);
+        uint16_t halfword[numberHalfWords];
+
+        // Handle 0 and ~0 here to simplify code below
+        if (!value) {
+            m_assembler.movz(dest, 0);
+            return;
+        }
+        if (!~value) {
+            m_assembler.movn(dest, 0);
+            return;
+        }
+
+        LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast(value)) : LogicalImmediate::create32(static_cast(value));
+
+        if (logicalImm.isValid()) {
+            m_assembler.movi(dest, logicalImm);
+            return;
+        }
+
+        // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
+        int zeroOrNegateVote = 0;
+        for (int i = 0; i < numberHalfWords; ++i) {
+            halfword[i] = getHalfword(value, i);
+            if (!halfword[i])
+                zeroOrNegateVote++;
+            else if (halfword[i] == 0xffff)
+                zeroOrNegateVote--;
+        }
+
+        bool needToClearRegister = true;
+        if (zeroOrNegateVote >= 0) {
+            for (int i = 0; i < numberHalfWords; i++) {
+                if (halfword[i]) {
+                    if (needToClearRegister) {
+                        m_assembler.movz(dest, halfword[i], 16*i);
+                        needToClearRegister = false;
+                    } else
+                        m_assembler.movk(dest, halfword[i], 16*i);
+                }
+            }
+        } else {
+            for (int i = 0; i < numberHalfWords; i++) {
+                if (halfword[i] != 0xffff) {
+                    if (needToClearRegister) {
+                        m_assembler.movn(dest, ~halfword[i], 16*i);
+                        needToClearRegister = false;
+                    } else
+                        m_assembler.movk(dest, halfword[i], 16*i);
+                }
+            }
+        }
+    }
+
+    template
+    ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        m_assembler.ldr(rt, rn, pimm);
+    }
+
+    template
+    ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        m_assembler.ldur(rt, rn, simm);
+    }
+
+    template
+    ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        loadUnsignedImmediate(rt, rn, pimm);
+    }
+
+    template
+    ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        loadUnscaledImmediate(rt, rn, simm);
+    }
+
+    template
+    ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+    {
+        m_assembler.str(rt, rn, pimm);
+    }
+
+    template
+    ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+    {
+        m_assembler.stur(rt, rn, simm);
+    }
+
+    void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
+    {
+        int32_t value = imm.m_value;
+        m_assembler.movz<32>(dest, getHalfword(value, 0));
+        m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+    }
+
+    void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
+    {
+        intptr_t value = reinterpret_cast(imm.m_value);
+        m_assembler.movz<64>(dest, getHalfword(value, 0));
+        m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
+        m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
+    }
+
+    void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
+    {
+        if (value >= 0) {
+            m_assembler.movz<32>(dest, getHalfword(value, 0));
+            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+        } else {
+            m_assembler.movn<32>(dest, ~getHalfword(value, 0));
+            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
+        }
+    }
+
+    template
+    ALWAYS_INLINE void load(const void* address, RegisterID dest)
+    {
+        intptr_t currentRegisterContents;
+        if (cachedMemoryTempRegister().value(currentRegisterContents)) {
+            intptr_t addressAsInt = reinterpret_cast(address);
+            intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+            if (dest == memoryTempRegister)
+                cachedMemoryTempRegister().invalidate();
+
+            if (isInIntRange(addressDelta)) {
+                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+                    m_assembler.ldur(dest,  memoryTempRegister, addressDelta);
+                    return;
+                }
+
+                if (ARM64Assembler::canEncodePImmOffset(addressDelta)) {
+                    m_assembler.ldr(dest,  memoryTempRegister, addressDelta);
+                    return;
+                }
+            }
+
+            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+                cachedMemoryTempRegister().setValue(reinterpret_cast(address));
+                m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr);
+                return;
+            }
+        }
+
+        move(TrustedImmPtr(address), memoryTempRegister);
+        if (dest == memoryTempRegister)
+            cachedMemoryTempRegister().invalidate();
+        else
+            cachedMemoryTempRegister().setValue(reinterpret_cast(address));
+        m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    template
+    ALWAYS_INLINE void store(RegisterID src, const void* address)
+    {
+        ASSERT(src != memoryTempRegister);
+        intptr_t currentRegisterContents;
+        if (cachedMemoryTempRegister().value(currentRegisterContents)) {
+            intptr_t addressAsInt = reinterpret_cast(address);
+            intptr_t addressDelta = addressAsInt - currentRegisterContents;
+
+            if (isInIntRange(addressDelta)) {
+                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
+                    m_assembler.stur(src, memoryTempRegister, addressDelta);
+                    return;
+                }
+
+                if (ARM64Assembler::canEncodePImmOffset(addressDelta)) {
+                    m_assembler.str(src, memoryTempRegister, addressDelta);
+                    return;
+                }
+            }
+
+            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
+                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
+                cachedMemoryTempRegister().setValue(reinterpret_cast(address));
+                m_assembler.str(src, memoryTempRegister, ARM64Registers::zr);
+                return;
+            }
+        }
+
+        move(TrustedImmPtr(address), memoryTempRegister);
+        cachedMemoryTempRegister().setValue(reinterpret_cast(address));
+        m_assembler.str(src, memoryTempRegister, ARM64Registers::zr);
+    }
+
+    template 
+    ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
+    {
+        intptr_t currentRegisterContents;
+        if (dest.value(currentRegisterContents)) {
+            if (currentRegisterContents == immediate)
+                return true;
+
+            LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast(immediate)) : LogicalImmediate::create32(static_cast(immediate));
+
+            if (logicalImm.isValid()) {
+                m_assembler.movi(dest.registerIDNoInvalidate(), logicalImm);
+                dest.setValue(immediate);
+                return true;
+            }
+
+            if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
+                if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
+                    m_assembler.movk(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
+
+                if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
+                    m_assembler.movk(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
+
+                dest.setValue(immediate);
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<32>(static_cast(imm.m_value), dest))
+            return;
+
+        moveInternal(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.m_value);
+    }
+
+    void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
+            return;
+
+        moveInternal(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.asIntptr());
+    }
+
+    void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
+    {
+        if (tryMoveUsingCacheRegisterContents<64>(static_cast(imm.m_value), dest))
+            return;
+
+        moveInternal(imm, dest.registerIDNoInvalidate());
+        dest.setValue(imm.m_value);
+    }
+
+    template
+    ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            loadUnscaledImmediate(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            loadUnsignedImmediate(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template
+    ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            loadSignedAddressedByUnscaledImmediate(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            loadSignedAddressedByUnsignedImmediate(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template
+    ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            m_assembler.ldur(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            m_assembler.ldr(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template
+    ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            storeUnscaledImmediate(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            storeUnsignedImmediate(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
+    template
+    ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+    {
+        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+            m_assembler.stur(rt, rn, offset);
+            return true;
+        }
+        if (ARM64Assembler::canEncodePImmOffset(offset)) {
+            m_assembler.str(rt, rn, static_cast(offset));
+            return true;
+        }
+        return false;
+    }
+
+    Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
+    {
+        if (cond == DoubleNotEqual) {
+            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump result = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            return result;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+            Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+            unordered.link(this);
+            // We get here if either unordered or equal.
+            Jump result = jump();
+            notEqual.link(this);
+            return result;
+        }
+        return makeBranch(cond);
+    }
+
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (!call.isFlagSet(Call::Near))
+            ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            ARM64Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARM64Assembler::linkCall(code, call.m_label, function.value());
+    }
+
+    CachedTempRegister m_dataMemoryTempRegister;
+    CachedTempRegister m_cachedMemoryTempRegister;
+    bool m_makeJumpPatchable;
+};
+
+// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrsb<64>(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.ldrsh<64>(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldurb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldurh(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldursb<64>(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.ldursh<64>(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.strb(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+    m_assembler.strh(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.sturb(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+    m_assembler.sturh(rt, rn, simm);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerARMv7.cpp b/assembler/MacroAssemblerARMv7.cpp
new file mode 100644
index 0000000..7119697
--- /dev/null
+++ b/assembler/MacroAssemblerARMv7.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+
+#include 
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerARMv7::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE)
+
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
+#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
+#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
+#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
+#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
+#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
+#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
+#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
+#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
+#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
+#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
+#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
+#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
+#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
+#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARMv7::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(sizeof(MacroAssemblerARMv7::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+    
+asm (
+    ".text" "\n"
+    ".align 2" "\n"
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    ".thumb" "\n"
+    ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    // MacroAssemblerARMv7::probe() has already generated code to store some values.
+    // The top of stack now looks like this:
+    //     esp[0 * ptrSize]: probeFunction
+    //     esp[1 * ptrSize]: arg1
+    //     esp[2 * ptrSize]: arg2
+    //     esp[3 * ptrSize]: saved r0
+    //     esp[4 * ptrSize]: saved ip
+    //     esp[5 * ptrSize]: saved lr
+    //     esp[6 * ptrSize]: saved sp
+
+    "mov       ip, sp" "\n"
+    "mov       r0, sp" "\n"
+    "sub       r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+    // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+    "bic       r0, r0, #0xf" "\n"
+    "mov       sp, r0" "\n"
+
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "add       lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n"
+    "stmia     lr, { r1-r11 }" "\n"
+    "mrs       lr, APSR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "vmrs      lr, FPSCR" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+    "ldr       lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+    "vstmia.64 ip!, { d0-d15 }" "\n"
+    "vstmia.64 ip!, { d16-d31 }" "\n"
+
+    "mov       fp, sp" "\n" // Save the ProbeContext*.
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+    "mov       r0, sp" "\n" // the ProbeContext* arg.
+    "blx       ip" "\n"
+
+    "mov       sp, fp" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n"
+    "vldmdb.64 ip!, { d16-d31 }" "\n"
+    "vldmdb.64 ip!, { d0-d15 }" "\n"
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+    "ldmdb     ip, { r0-r11 }" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+    "vmsr      FPSCR, ip" "\n"
+
+    // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+    // There are 2 issues that complicate the restoration of these last few
+    // registers:
+    //
+    // 1. Normal ARM calling convention relies on moving lr to pc to return to
+    //    the caller. In our case, the address to return to is specified by
+    //    ProbeContext.cpu.pc. And at that moment, we won't have any available
+    //    scratch registers to hold the return address (lr needs to hold
+    //    ProbeContext.cpu.lr, not the return address).
+    //
+    //    The solution is to store the return address on the stack and load the
+    //    pc from there.
+    //
+    // 2. Issue 1 means we will need to write to the stack location at
+    //    ProbeContext.cpu.sp - 4. But if the user probe function had modified
+    //    the value of ProbeContext.cpu.sp to point in the range between
+    //    &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+    //    Issue 1 may trash the values to be restored before we can restore
+    //    them.
+    //
+    //    The solution is to check if ProbeContext.cpu.sp contains a value in
+    //    the undesirable range. If so, we copy the remaining ProbeContext
+    //    register data to a safe range (at memory lower than where
+    //    ProbeContext.cpu.sp points) first, and restore the remaining register
+    //    from this new range.
+
+    "add       ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "cmp       lr, ip" "\n"
+    "it        gt" "\n"
+    "bgt     " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // We get here because the new expected stack pointer location is lower
+    // than where it's supposed to be. This means the safe range of stack
+    // memory where we'll be copying the remaining register restore values to
+    // might be in a region of memory below the sp i.e. unallocated stack
+    // memory. This, in turn, makes it vulnerable to interrupts potentially
+    // trashing the copied values. To prevent that, we must first allocate the
+    // needed stack memory by adjusting the sp before the copying.
+
+    "sub       lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+    " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+    "mov       ip, sp" "\n"
+    "mov       sp, lr" "\n"
+    "mov       lr, ip" "\n"
+
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "str       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+    ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+    "ldr       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+    "sub       lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+    "str       ip, [lr]" "\n"
+    "str       lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+    "msr       APSR, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+    "mov       lr, ip" "\n"
+    "ldr       ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+    "ldr       sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+    "pop       { pc }" "\n"
+);
+#endif // COMPILER(GCC_OR_CLANG)
+
+void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2)
+{
+    push(RegisterID::lr);
+    push(RegisterID::lr);
+    add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr);
+    store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4));
+    push(RegisterID::ip);
+    push(RegisterID::r0);
+    // The following uses RegisterID::ip. So, they must come after we push ip above.
+    push(trustedImm32FromPtr(arg2));
+    push(trustedImm32FromPtr(arg1));
+    push(trustedImm32FromPtr(function));
+
+    move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip);
+    m_assembler.blx(RegisterID::ip);
+}
+#endif // ENABLE(MASM_PROBE)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
diff --git a/assembler/MacroAssemblerARMv7.h b/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 0000000..3c95f28
--- /dev/null
+++ b/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,2139 @@
+/*
+ * Copyright (C) 2009-2010, 2014-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler {
+    static const RegisterID dataTempRegister = ARMRegisters::ip;
+    static const RegisterID addressTempRegister = ARMRegisters::r6;
+
+    static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
+    inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
+
+public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
+    MacroAssemblerARMv7()
+        : m_makeJumpPatchable(false)
+    {
+    }
+
+    typedef ARMv7Assembler::LinkRecord LinkRecord;
+    typedef ARMv7Assembler::JumpType JumpType;
+    typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+    typedef ARMv7Assembler::Condition Condition;
+
+    static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
+    static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value >= -255 && value <= 255;
+    }
+
+    Vector& jumpsToLink() { return m_assembler.jumpsToLink(); }
+    void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+    static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
+    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
+    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
+    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+    static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); }
+
+    struct ArmAddress {
+        enum AddressType {
+            HasOffset,
+            HasIndex,
+        } type;
+        RegisterID base;
+        union {
+            int32_t offset;
+            struct {
+                RegisterID index;
+                Scale scale;
+            };
+        } u;
+        
+        explicit ArmAddress(RegisterID base, int32_t offset = 0)
+            : type(HasOffset)
+            , base(base)
+        {
+            u.offset = offset;
+        }
+        
+        explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+            : type(HasIndex)
+            , base(base)
+        {
+            u.index = index;
+            u.scale = scale;
+        }
+    };
+    
+public:
+    static const Scale ScalePtr = TimesFour;
+
+    enum RelationalCondition {
+        Equal = ARMv7Assembler::ConditionEQ,
+        NotEqual = ARMv7Assembler::ConditionNE,
+        Above = ARMv7Assembler::ConditionHI,
+        AboveOrEqual = ARMv7Assembler::ConditionHS,
+        Below = ARMv7Assembler::ConditionLO,
+        BelowOrEqual = ARMv7Assembler::ConditionLS,
+        GreaterThan = ARMv7Assembler::ConditionGT,
+        GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+        LessThan = ARMv7Assembler::ConditionLT,
+        LessThanOrEqual = ARMv7Assembler::ConditionLE
+    };
+
+    enum ResultCondition {
+        Overflow = ARMv7Assembler::ConditionVS,
+        Signed = ARMv7Assembler::ConditionMI,
+        PositiveOrZero = ARMv7Assembler::ConditionPL,
+        Zero = ARMv7Assembler::ConditionEQ,
+        NonZero = ARMv7Assembler::ConditionNE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARMv7Assembler::ConditionEQ,
+        DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+        DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+        DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+        DoubleLessThan = ARMv7Assembler::ConditionLO,
+        DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+        DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+        DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+        DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+        DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+        DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
+    };
+
+    static const RegisterID stackPointerRegister = ARMRegisters::sp;
+    static const RegisterID framePointerRegister = ARMRegisters::fp;
+    static const RegisterID linkRegister = ARMRegisters::lr;
+
+    // Integer arithmetic operations:
+    //
+    // Operations are typically two operand - operation(source, srcDst)
+    // For many operations the source may be an TrustedImm32, the srcDst operand
+    // may often be a memory location (explictly described using an Address
+    // object).
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.add(dest, dest, src);
+    }
+
+    void add32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.add(dest, left, right);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, dest, dest);
+    }
+    
+    void add32(AbsoluteAddress src, RegisterID dest)
+    {
+        load32(src.m_ptr, dataTempRegister);
+        add32(dataTempRegister, dest);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+
+        // For adds with stack pointer destination, moving the src first to sp is
+        // needed to avoid unpredictable instruction
+        if (dest == ARMRegisters::sp && src != dest) {
+            move(src, ARMRegisters::sp);
+            src = ARMRegisters::sp;
+        }
+
+        if (armImm.isValid())
+            m_assembler.add(dest, src, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.add(dest, src, dataTempRegister);
+        }
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        add32(dataTempRegister, dest);
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+    {
+        add32(imm, srcDest);
+    }
+    
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+
+        m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+        else {
+            move(imm, addressTempRegister);
+            m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+        }
+        m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+
+        m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+        m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
+        m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.ARM_and(dest, op1, op2);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.ARM_and(dest, src, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.ARM_and(dest, src, dataTempRegister);
+        }
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        and32(dest, src, dest);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        and32(imm, dest, dest);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        and32(dataTempRegister, dest);
+    }
+
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.clz(dest, src);
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        // Clamp the shift to the range 0..31
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+        ASSERT(armImm.isValid());
+        m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+        m_assembler.lsl(dest, src, dataTempRegister);
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.lsl(dest, src, imm.m_value & 0x1f);
+    }
+
+    void lshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        lshift32(dest, shiftAmount, dest);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        lshift32(dest, imm, dest);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.smull(dest, dataTempRegister, dest, src);
+    }
+
+    void mul32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.smull(dest, dataTempRegister, left, right);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, dataTempRegister);
+        m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.neg(srcDest, srcDest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orr(dest, dest, src);
+    }
+    
+    void or32(RegisterID src, AbsoluteAddress dest)
+    {
+        move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+        load32(addressTempRegister, dataTempRegister);
+        or32(src, dataTempRegister);
+        store32(dataTempRegister, addressTempRegister);
+    }
+
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid()) {
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            load32(addressTempRegister, dataTempRegister);
+            m_assembler.orr(dataTempRegister, dataTempRegister, armImm);
+            store32(dataTempRegister, addressTempRegister);
+        } else {
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            load32(addressTempRegister, dataTempRegister);
+            move(imm, addressTempRegister);
+            m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister);
+            move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+            store32(dataTempRegister, addressTempRegister);
+        }
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+        or32(imm, dataTempRegister, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        or32(imm, dest, dest);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orr(dest, op1, op2);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.orr(dest, src, armImm);
+        else {
+            ASSERT(src != dataTempRegister);
+            move(imm, dataTempRegister);
+            m_assembler.orr(dest, src, dataTempRegister);
+        }
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        // Clamp the shift to the range 0..31
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+        ASSERT(armImm.isValid());
+        m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+        m_assembler.asr(dest, src, dataTempRegister);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.asr(dest, src, imm.m_value & 0x1f);
+    }
+
+    void rshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        rshift32(dest, shiftAmount, dest);
+    }
+    
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        rshift32(dest, imm, dest);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        // Clamp the shift to the range 0..31
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+        ASSERT(armImm.isValid());
+        m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+        
+        m_assembler.lsr(dest, src, dataTempRegister);
+    }
+    
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            move(src, dest);
+        else
+            m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+    }
+
+    void urshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        urshift32(dest, shiftAmount, dest);
+    }
+    
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        urshift32(dest, imm, dest);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub(dest, dest, src);
+    }
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.sub(dest, left, right);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.sub(dest, dest, dataTempRegister);
+        }
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        sub32(dataTempRegister, dest);
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.eor(dest, op1, op2);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1) {
+            m_assembler.mvn(dest, src);
+            return;
+        }
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.eor(dest, src, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.eor(dest, src, dataTempRegister);
+        }
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        xor32(dest, src, dest);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.mvn(dest, dest);
+        else
+            xor32(imm, dest, dest);
+    }
+    
+
+    // Memory access operations:
+    //
+    // Loads are of the form load(address, destination) and stores of the form
+    // store(source, address).  The source for a store may be an TrustedImm32.  Address
+    // operand objects to loads and store will be implicitly constructed if a
+    // register is passed.
+
+private:
+    void load32(ArmAddress address, RegisterID dest)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.ldr(dest, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+        }
+    }
+
+    void load16(ArmAddress address, RegisterID dest)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.ldrh(dest, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+        }
+    }
+    
+    void load16SignedExtendTo32(ArmAddress address, RegisterID dest)
+    {
+        ASSERT(address.type == ArmAddress::HasIndex);
+        m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
+    }
+
+    void load8(ArmAddress address, RegisterID dest)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.ldrb(dest, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
+        }
+    }
+    
+    void load8SignedExtendTo32(ArmAddress address, RegisterID dest)
+    {
+        ASSERT(address.type == ArmAddress::HasIndex);
+        m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
+    }
+
+protected:
+    void store32(RegisterID src, ArmAddress address)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.str(src, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.str(src, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.str(src, address.base, address.u.offset, true, false);
+        }
+    }
+
+private:
+    void store8(RegisterID src, ArmAddress address)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.strb(src, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.strb(src, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.strb(src, address.base, address.u.offset, true, false);
+        }
+    }
+    
+    void store16(RegisterID src, ArmAddress address)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.strh(src, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.strh(src, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.strh(src, address.base, address.u.offset, true, false);
+        }
+    }
+
+public:
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        load16(setupArmAddress(address), dest);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address), addressTempRegister);
+        m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+    }
+    
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), addressTempRegister);
+        abortWithReason(reason);
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+        ASSERT(address.offset >= 0 && address.offset <= 255);
+        m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
+        return result;
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        load8(setupArmAddress(address), dest);
+    }
+
+    void load8SignedExtendTo32(ImplicitAddress, RegisterID)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        load8(setupArmAddress(address), dest);
+    }
+    
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        load8SignedExtendTo32(setupArmAddress(address), dest);
+    }
+
+    void load8(const void* address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address), dest);
+        load8(dest, dest);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+        load32(ArmAddress(address.base, dataTempRegister), dest);
+        return label;
+    }
+    
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+
+        RegisterID base = address.base;
+        
+        DataLabelCompact label(this);
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+
+        m_assembler.ldr(dest, base, address.offset, true, false);
+        return label;
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+    }
+    
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        load16SignedExtendTo32(setupArmAddress(address), dest);
+    }
+    
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
+        if (armImm.isValid())
+            m_assembler.ldrh(dest, address.base, armImm);
+        else {
+            move(TrustedImm32(address.offset), dataTempRegister);
+            m_assembler.ldrh(dest, address.base, dataTempRegister);
+        }
+    }
+    
+    void load16SignedExtendTo32(ImplicitAddress, RegisterID)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+        store32(src, ArmAddress(address.base, dataTempRegister));
+        return label;
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        store32(src, setupArmAddress(address));
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        store32(src, setupArmAddress(address));
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        move(imm, dataTempRegister);
+        store32(dataTempRegister, setupArmAddress(address));
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        move(imm, dataTempRegister);
+        store32(dataTempRegister, setupArmAddress(address));
+    }
+
+    void store32(RegisterID src, const void* address)
+    {
+        move(TrustedImmPtr(address), addressTempRegister);
+        m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    void store32(TrustedImm32 imm, const void* address)
+    {
+        move(imm, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+    void store8(RegisterID src, Address address)
+    {
+        store8(src, setupArmAddress(address));
+    }
+    
+    void store8(RegisterID src, BaseIndex address)
+    {
+        store8(src, setupArmAddress(address));
+    }
+    
+    void store8(RegisterID src, void* address)
+    {
+        move(TrustedImmPtr(address), addressTempRegister);
+        store8(src, ArmAddress(addressTempRegister, 0));
+    }
+    
+    void store8(TrustedImm32 imm, void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, dataTempRegister);
+        store8(dataTempRegister, address);
+    }
+    
+    void store8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(imm8, dataTempRegister);
+        store8(dataTempRegister, address);
+    }
+    
+    void store16(RegisterID src, BaseIndex address)
+    {
+        store16(src, setupArmAddress(address));
+    }
+
+    // Possibly clobbers src, but not on this architecture.
+    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.vmov(dest1, dest2, src);
+    }
+    
+    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+    {
+        UNUSED_PARAM(scratch);
+        m_assembler.vmov(dest, src1, src2);
+    }
+
+    static bool shouldBlindForSpecificArch(uint32_t value)
+    {
+        ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+        // Couldn't be encoded as an immediate, so assume it's untrusted.
+        if (!immediate.isValid())
+            return true;
+        
+        // If we can encode the immediate, we have less than 16 attacker
+        // controlled bits.
+        if (immediate.isEncodedImm())
+            return false;
+
+        // Don't let any more than 12 bits of an instruction word
+        // be controlled by an attacker.
+        return !immediate.isUInt12();
+    }
+
+    // Floating-point operations:
+
+    static bool supportsFloatingPoint() { return true; }
+    static bool supportsFloatingPointTruncate() { return true; }
+    static bool supportsFloatingPointSqrt() { return true; }
+    static bool supportsFloatingPointAbs() { return true; }
+    static bool supportsFloatingPointRounding() { return false; }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(TrustedImm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.vldr(dest, base, offset);
+    }
+
+    void loadFloat(ImplicitAddress address, FPRegisterID dest)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(TrustedImm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        move(address.index, addressTempRegister);
+        lshift32(TrustedImm32(address.scale), addressTempRegister);
+        add32(address.base, addressTempRegister);
+        loadDouble(Address(addressTempRegister, address.offset), dest);
+    }
+    
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        move(address.index, addressTempRegister);
+        lshift32(TrustedImm32(address.scale), addressTempRegister);
+        add32(address.base, addressTempRegister);
+        loadFloat(Address(addressTempRegister, address.offset), dest);
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.vmov(dest, src);
+    }
+
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        static double zeroConstant = 0.;
+        loadDouble(TrustedImmPtr(&zeroConstant), reg);
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+        move(address, addressTempRegister);
+        m_assembler.vldr(dest, addressTempRegister, 0);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(TrustedImm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.vstr(src, base, offset);
+    }
+
+    void storeFloat(FPRegisterID src, ImplicitAddress address)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(TrustedImm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+        move(address, addressTempRegister);
+        storeDouble(src, addressTempRegister);
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        move(address.index, addressTempRegister);
+        lshift32(TrustedImm32(address.scale), addressTempRegister);
+        add32(address.base, addressTempRegister);
+        storeDouble(src, Address(addressTempRegister, address.offset));
+    }
+    
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        move(address.index, addressTempRegister);
+        lshift32(TrustedImm32(address.scale), addressTempRegister);
+        add32(address.base, addressTempRegister);
+        storeFloat(src, Address(addressTempRegister, address.offset));
+    }
+    
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vadd(dest, dest, src);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vadd(dest, op1, op2);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+        m_assembler.vadd(dest, dest, fpTempRegister);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vdiv(dest, dest, src);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vdiv(dest, op1, op2);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vsub(dest, dest, src);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        subDouble(fpTempRegister, dest);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vsub(dest, op1, op2);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmul(dest, dest, src);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        mulDouble(fpTempRegister, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.vmul(dest, op1, op2);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vsqrt(dest, src);
+    }
+    
+    void absDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vabs(dest, src);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vneg(dest, src);
+    }
+
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmov(fpTempRegister, src, src);
+        m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+    }
+
+    void convertInt32ToDouble(Address address, FPRegisterID dest)
+    {
+        // Fixme: load directly into the fpr!
+        load32(address, dataTempRegister);
+        m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+        m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        // Fixme: load directly into the fpr!
+        load32(address.m_ptr, dataTempRegister);
+        m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+        m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+    }
+    
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
+    }
+    
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.vcmp(left, right);
+        m_assembler.vmrs();
+
+        if (cond == DoubleNotEqual) {
+            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+            Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+            Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+            unordered.link(this);
+            return result;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+            Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+            unordered.link(this);
+            // We get here if either unordered or equal.
+            Jump result = jump();
+            notEqual.link(this);
+            return result;
+        }
+        return makeBranch(cond);
+    }
+
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        // Convert into dest.
+        m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+        m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+        // Calculate 2x dest.  If the value potentially underflowed, it will have
+        // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
+        // overflow the result will be equal to -2.
+        Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
+        Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
+
+        // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
+        underflow.link(this);
+        if (branchType == BranchIfTruncateSuccessful)
+            return noOverflow;
+
+        // We'll reach the current point in the code on failure, so plant a
+        // jump here & link the success case.
+        Jump failure = jump();
+        noOverflow.link(this);
+        return failure;
+    }
+
+    // Result is undefined if the value is outside of the integer range.
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+        m_assembler.vmov(dest, fpTempRegisterAsSingle());
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
+        m_assembler.vmov(dest, fpTempRegisterAsSingle());
+    }
+    
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+    {
+        m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+        m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+        if (negZeroCheck)
+            failureCases.append(branchTest32(Zero, dest));
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.vcmpz(reg);
+        m_assembler.vmrs();
+        Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+        Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+        unordered.link(this);
+        return result;
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.vcmpz(reg);
+        m_assembler.vmrs();
+        Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+        Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+        unordered.link(this);
+        // We get here if either unordered or equal.
+        Jump result = jump();
+        notEqual.link(this);
+        return result;
+    }
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data.  Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack.  Peek and poke operations read or write
+    // values on the stack, without moving the current stack position.
+    
+    void pop(RegisterID dest)
+    {
+        m_assembler.pop(dest);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.push(src);
+    }
+
+    void push(Address address)
+    {
+        load32(address, dataTempRegister);
+        push(dataTempRegister);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        move(imm, dataTempRegister);
+        push(dataTempRegister);
+    }
+
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.pop(1 << dest1 | 1 << dest2);
+    }
+    
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.push(1 << src1 | 1 << src2);
+    }
+    
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        uint32_t value = imm.m_value;
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+        if (armImm.isValid())
+            m_assembler.mov(dest, armImm);
+        else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+            m_assembler.mvn(dest, armImm);
+        else {
+            m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+            if (value & 0xffff0000)
+                m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+        }
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.mov(dest, src);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        move(TrustedImm32(imm), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        move(reg1, dataTempRegister);
+        move(reg2, reg1);
+        move(dataTempRegister, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        return static_cast(cond ^ 1);
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+    
+    void memoryFence()
+    {
+        m_assembler.dmbSY();
+    }
+    
+    void storeFence()
+    {
+        m_assembler.dmbISHST();
+    }
+    
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return ARMv7Assembler::maxJumpReplacementSize();
+    }
+
+    static ptrdiff_t patchableJumpSize()
+    {
+        return ARMv7Assembler::patchableJumpSize();
+    }
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+private:
+
+    // Should we be using TEQ for equal/not-equal?
+    void compare32AndSetFlags(RegisterID left, TrustedImm32 right)
+    {
+        int32_t imm = right.m_value;
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+        if (armImm.isValid())
+            m_assembler.cmp(left, armImm);
+        else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+            m_assembler.cmn(left, armImm);
+        else {
+            move(TrustedImm32(imm), dataTempRegister);
+            m_assembler.cmp(left, dataTempRegister);
+        }
+    }
+
+public:
+    void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        int32_t imm = mask.m_value;
+
+        if (imm == -1)
+            m_assembler.tst(reg, reg);
+        else {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+            if (armImm.isValid()) {
+                if (reg == ARMRegisters::sp) {
+                    move(reg, addressTempRegister);
+                    m_assembler.tst(addressTempRegister, armImm);
+                } else
+                    m_assembler.tst(reg, armImm);
+            } else {
+                move(mask, dataTempRegister);
+                if (reg == ARMRegisters::sp) {
+                    move(reg, addressTempRegister);
+                    m_assembler.tst(addressTempRegister, dataTempRegister);
+                } else
+                    m_assembler.tst(reg, dataTempRegister);
+            }
+        }
+    }
+    
+    Jump branch(ResultCondition cond)
+    {
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmp(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        compare32AndSetFlags(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load32(right, dataTempRegister);
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load32(left, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32WithUnalignedHalfWords(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left.m_ptr, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        load32(left, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        compare32AndSetFlags(left, right8);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        return branch8(cond, addressTempRegister, right8);
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right8);
+    }
+    
+    Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
+    {
+        // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
+        return branch32(cond, addressTempRegister, right8);
+    }
+    
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
+        m_assembler.tst(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero);
+        test32(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+        load32(address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask);
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+        load32(address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask8);
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.bx(target);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(Address address)
+    {
+        load32(address, dataTempRegister);
+        m_assembler.bx(dataTempRegister);
+    }
+    
+    void jump(AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+        load32(Address(dataTempRegister), dataTempRegister);
+        m_assembler.bx(dataTempRegister);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation.  The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.add_S(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add_S(dest, op1, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.add_S(dest, op1, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, src, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        return branchAdd32(cond, dest, dataTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchAdd32(cond, dest, imm, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        // Move the high bits of the address into addressTempRegister,
+        // and load the value into dataTempRegister.
+        move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+        m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+        // Do the add.
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // If the operand does not fit into an immediate then load it temporarily
+            // into addressTempRegister; since we're overwriting addressTempRegister
+            // we'll need to reload it with the high bits of the address afterwards.
+            move(imm, addressTempRegister);
+            m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+            move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+        }
+
+        // Store the result.
+        m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        m_assembler.smull(dest, dataTempRegister, src1, src2);
+
+        if (cond == Overflow) {
+            m_assembler.asr(addressTempRegister, dest, 31);
+            return branch32(NotEqual, addressTempRegister, dataTempRegister);
+        }
+
+        return branchTest32(cond, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchMul32(cond, src, dest, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, dataTempRegister);
+        return branchMul32(cond, dataTempRegister, src, dest);
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+    {
+        ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+        m_assembler.sub_S(srcDest, zero, srcDest);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        m_assembler.orr_S(dest, dest, src);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.sub_S(dest, op1, op2);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub_S(dest, op1, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.sub_S(dest, op1, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+    
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        return branchSub32(cond, dest, src, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        return branchSub32(cond, dest, imm, dest);
+    }
+    
+    void relativeTableJump(RegisterID index, int scale)
+    {
+        ASSERT(scale >= 0 && scale <= 31);
+
+        // dataTempRegister will point after the jump if index register contains zero
+        move(ARMRegisters::pc, dataTempRegister);
+        m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
+
+        ShiftTypeAndAmount shift(SRType_LSL, scale);
+        m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
+        jump(dataTempRegister);
+    }
+
+    // Miscellaneous operations:
+
+    void breakpoint(uint8_t imm = 0)
+    {
+        m_assembler.bkpt(imm);
+    }
+
+    ALWAYS_INLINE Call nearCall()
+    {
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+    }
+
+    ALWAYS_INLINE Call nearTailCall()
+    {
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail);
+    }
+
+    ALWAYS_INLINE Call call()
+    {
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+    }
+
+    ALWAYS_INLINE Call call(RegisterID target)
+    {
+        return Call(m_assembler.blx(target), Call::None);
+    }
+
+    ALWAYS_INLINE Call call(Address address)
+    {
+        load32(address, dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::None);
+    }
+
+    ALWAYS_INLINE void ret()
+    {
+        m_assembler.bx(linkRegister);
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp(left, right);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        load32(left, dataTempRegister);
+        compare32(cond, dataTempRegister, right, dest);
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        compare32(cond, addressTempRegister, right8, dest);
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        compare32AndSetFlags(left, right);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    // FIXME:
+    // The mask should be optional... paerhaps the argument order should be
+    // dest-src, operations always have a dest? ... possibly not true, considering
+    // asm ops like test, or pseudo ops like pop().
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        load32(address, dataTempRegister);
+        test32(dataTempRegister, mask);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        test32(dataTempRegister, mask8);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
+    {
+        padBeforePatch();
+        moveFixedWidthEncoding(imm, dst);
+        return DataLabel32(this);
+    }
+
+    ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
+    {
+        padBeforePatch();
+        moveFixedWidthEncoding(TrustedImm32(imm), dst);
+        return DataLabelPtr(this);
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        load32(left, addressTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, addressTempRegister, dataTempRegister);
+    }
+    
+    ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        load32(left, addressTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, addressTempRegister, dataTempRegister);
+    }
+    
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, TrustedImm32(right));
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+    
+    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchTest32(cond, reg, mask);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, reg, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32(cond, left, imm);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_makeJumpPatchable = true;
+        Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    PatchableJump patchableJump()
+    {
+        padBeforePatch();
+        m_makeJumpPatchable = true;
+        Jump result = jump();
+        m_makeJumpPatchable = false;
+        return PatchableJump(result);
+    }
+
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+        store32(dataTempRegister, address);
+        return label;
+    }
+    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+
+    ALWAYS_INLINE Call tailRecursiveCall()
+    {
+        // Like a normal call, but don't link.
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+    }
+
+    ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+    
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast(ARMv7Assembler::readCallTarget(call.dataLocation())));
+    }
+    
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+    
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        const unsigned twoWordOpSize = 4;
+        return label.labelAtOffset(-twoWordOpSize * 2);
+    }
+    
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+    {
+#if OS(LINUX)
+        ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast(initialValue));
+#else
+        UNUSED_PARAM(rd);
+        ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast(initialValue) & 0xffff));
+#endif
+    }
+    
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+    
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+    ALWAYS_INLINE Jump jump()
+    {
+        m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
+    }
+
+    ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
+    {
+        m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+        m_assembler.it(cond, true, true);
+        moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+        return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
+    }
+    ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
+    ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+    ArmAddress setupArmAddress(BaseIndex address)
+    {
+        if (address.offset) {
+            ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+            if (imm.isValid())
+                m_assembler.add(addressTempRegister, address.base, imm);
+            else {
+                move(TrustedImm32(address.offset), addressTempRegister);
+                m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+            }
+
+            return ArmAddress(addressTempRegister, address.index, address.scale);
+        } else
+            return ArmAddress(address.base, address.index, address.scale);
+    }
+
+    ArmAddress setupArmAddress(Address address)
+    {
+        if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+            return ArmAddress(address.base, address.offset);
+
+        move(TrustedImm32(address.offset), addressTempRegister);
+        return ArmAddress(address.base, addressTempRegister);
+    }
+
+    ArmAddress setupArmAddress(ImplicitAddress address)
+    {
+        if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+            return ArmAddress(address.base, address.offset);
+
+        move(TrustedImm32(address.offset), addressTempRegister);
+        return ArmAddress(address.base, addressTempRegister);
+    }
+
+    RegisterID makeBaseIndexBase(BaseIndex address)
+    {
+        if (!address.offset)
+            return address.base;
+
+        ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+        if (imm.isValid())
+            m_assembler.add(addressTempRegister, address.base, imm);
+        else {
+            move(TrustedImm32(address.offset), addressTempRegister);
+            m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+        }
+
+        return addressTempRegister;
+    }
+
+    void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
+    {
+        uint32_t value = imm.m_value;
+        m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+        m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+    }
+
+    ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+    {
+        return static_cast(cond);
+    }
+    
+private:
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            ARMv7Assembler::linkJump(code, call.m_label, function.value());
+        else
+            ARMv7Assembler::linkCall(code, call.m_label, function.value());
+    }
+
+#if ENABLE(MASM_PROBE)
+    inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+    {
+        return TrustedImm32(TrustedImmPtr(ptr));
+    }
+
+    inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+    {
+        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
+    }
+
+    inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+    {
+        return TrustedImm32(TrustedImmPtr(reinterpret_cast(function)));
+    }
+#endif
+
+    bool m_makeJumpPatchable;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerCodeRef.cpp b/assembler/MacroAssemblerCodeRef.cpp
new file mode 100644
index 0000000..06460c9
--- /dev/null
+++ b/assembler/MacroAssemblerCodeRef.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "MacroAssemblerCodeRef.h"
+
+#include "JSCInlines.h"
+#include "LLIntData.h"
+
+namespace JSC {
+
+MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID codeId)
+{
+    return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+}
+
+void MacroAssemblerCodePtr::dumpWithName(const char* name, PrintStream& out) const
+{
+    if (!m_value) {
+        out.print(name, "(null)");
+        return;
+    }
+    if (executableAddress() == dataLocation()) {
+        out.print(name, "(", RawPointer(executableAddress()), ")");
+        return;
+    }
+    out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")");
+}
+
+void MacroAssemblerCodePtr::dump(PrintStream& out) const
+{
+    dumpWithName("CodePtr", out);
+}
+
+MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID codeId)
+{
+    return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+}
+
+bool MacroAssemblerCodeRef::tryToDisassemble(PrintStream& out, const char* prefix) const
+{
+    return JSC::tryToDisassemble(m_codePtr, size(), prefix, out);
+}
+
+bool MacroAssemblerCodeRef::tryToDisassemble(const char* prefix) const
+{
+    return tryToDisassemble(WTF::dataFile(), prefix);
+}
+
+CString MacroAssemblerCodeRef::disassembly() const
+{
+    StringPrintStream out;
+    if (!tryToDisassemble(out, ""))
+        return CString();
+    return out.toCString();
+}
+
+void MacroAssemblerCodeRef::dump(PrintStream& out) const
+{
+    m_codePtr.dumpWithName("CodeRef", out);
+}
+
+} // namespace JSC
+
diff --git a/assembler/MacroAssemblerCodeRef.h b/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 0000000..a71a486
--- /dev/null
+++ b/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2009, 2012, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ExecutableAllocator.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if CPU(ARM_THUMB2) && ENABLE(JIT)
+// ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into
+// into the processor are decorated with the bottom bit set, while traditional ARM has
+// the lower bit clear. Since we don't know what kind of pointer, we check for both
+// decorated and undecorated null.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+    ASSERT(reinterpret_cast(ptr) & ~1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+    ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+    ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+namespace JSC {
+
+enum OpcodeID : unsigned;
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+    FunctionPtr()
+        : m_value(0)
+    {
+    }
+
+    template
+    FunctionPtr(returnType(*value)())
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2, argType3))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+// MSVC doesn't seem to treat functions with different calling conventions as
+// different types; these methods already defined for fastcall, below.
+#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
+
+    template
+    FunctionPtr(returnType (CDECL *value)())
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (CDECL *value)(argType1))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (CDECL *value)(argType1, argType2))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+#endif
+
+#if COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION)
+
+    template
+    FunctionPtr(returnType (FASTCALL *value)())
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (FASTCALL *value)(argType1))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (FASTCALL *value)(argType1, argType2))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    template
+    FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4))
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+#endif
+
+    template
+    explicit FunctionPtr(FunctionType* value)
+        // Using a C-ctyle cast here to avoid compiler error on RVTC:
+        // Error:  #694: reinterpret_cast cannot cast away const or other type qualifiers
+        // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
+        : m_value((void*)value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* value() const { return m_value; }
+    void* executableAddress() const { return m_value; }
+
+
+private:
+    void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code.  We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+    ReturnAddressPtr()
+        : m_value(0)
+    {
+    }
+
+    explicit ReturnAddressPtr(void* value)
+        : m_value(value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    explicit ReturnAddressPtr(FunctionPtr function)
+        : m_value(function.value())
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* value() const { return m_value; }
+    
+    void dump(PrintStream& out) const
+    {
+        out.print(RawPointer(m_value));
+    }
+
+private:
+    void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+    MacroAssemblerCodePtr()
+        : m_value(0)
+    {
+    }
+
+    explicit MacroAssemblerCodePtr(void* value)
+#if CPU(ARM_THUMB2)
+        // Decorate the pointer as a thumb code pointer.
+        : m_value(reinterpret_cast(value) + 1)
+#else
+        : m_value(value)
+#endif
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+    
+    static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
+    {
+        ASSERT_VALID_CODE_POINTER(value);
+        MacroAssemblerCodePtr result;
+        result.m_value = value;
+        return result;
+    }
+
+    static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId);
+
+    explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+        : m_value(ra.value())
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* executableAddress() const { return m_value; }
+#if CPU(ARM_THUMB2)
+    // To use this pointer as a data address remove the decoration.
+    void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast(m_value) - 1; }
+#else
+    void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+    explicit operator bool() const { return m_value; }
+    
+    bool operator==(const MacroAssemblerCodePtr& other) const
+    {
+        return m_value == other.m_value;
+    }
+
+    void dumpWithName(const char* name, PrintStream& out) const;
+    
+    void dump(PrintStream& out) const;
+    
+    enum EmptyValueTag { EmptyValue };
+    enum DeletedValueTag { DeletedValue };
+    
+    MacroAssemblerCodePtr(EmptyValueTag)
+        : m_value(emptyValue())
+    {
+    }
+    
+    MacroAssemblerCodePtr(DeletedValueTag)
+        : m_value(deletedValue())
+    {
+    }
+    
+    bool isEmptyValue() const { return m_value == emptyValue(); }
+    bool isDeletedValue() const { return m_value == deletedValue(); }
+    
+    unsigned hash() const { return PtrHash::hash(m_value); }
+
+private:
+    static void* emptyValue() { return bitwise_cast(static_cast(1)); }
+    static void* deletedValue() { return bitwise_cast(static_cast(2)); }
+    
+    void* m_value;
+};
+
+struct MacroAssemblerCodePtrHash {
+    static unsigned hash(const MacroAssemblerCodePtr& ptr) { return ptr.hash(); }
+    static bool equal(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code.  A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+private:
+    // This is private because it's dangerous enough that we want uses of it
+    // to be easy to find - hence the static create method below.
+    explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
+        : m_codePtr(codePtr)
+    {
+        ASSERT(m_codePtr);
+    }
+
+public:
+    MacroAssemblerCodeRef()
+    {
+    }
+
+    MacroAssemblerCodeRef(PassRefPtr executableMemory)
+        : m_codePtr(executableMemory->start())
+        , m_executableMemory(executableMemory)
+    {
+        ASSERT(m_executableMemory->isManaged());
+        ASSERT(m_executableMemory->start());
+        ASSERT(m_codePtr);
+    }
+    
+    // Use this only when you know that the codePtr refers to code that is
+    // already being kept alive through some other means. Typically this means
+    // that codePtr is immortal.
+    static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
+    {
+        return MacroAssemblerCodeRef(codePtr);
+    }
+    
+    // Helper for creating self-managed code refs from LLInt.
+    static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId);
+
+    ExecutableMemoryHandle* executableMemory() const
+    {
+        return m_executableMemory.get();
+    }
+    
+    MacroAssemblerCodePtr code() const
+    {
+        return m_codePtr;
+    }
+    
+    size_t size() const
+    {
+        if (!m_executableMemory)
+            return 0;
+        return m_executableMemory->sizeInBytes();
+    }
+
+    bool tryToDisassemble(PrintStream& out, const char* prefix = "") const;
+    
+    bool tryToDisassemble(const char* prefix = "") const;
+    
+    JS_EXPORT_PRIVATE CString disassembly() const;
+    
+    explicit operator bool() const { return !!m_codePtr; }
+    
+    void dump(PrintStream& out) const;
+
+private:
+    MacroAssemblerCodePtr m_codePtr;
+    RefPtr m_executableMemory;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::MacroAssemblerCodePtrHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public CustomHashTraits { };
+
+} // namespace WTF
diff --git a/assembler/MacroAssemblerHelpers.h b/assembler/MacroAssemblerHelpers.h
new file mode 100644
index 0000000..047e94c
--- /dev/null
+++ b/assembler/MacroAssemblerHelpers.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+namespace MacroAssemblerHelpers {
+
+// True if this:
+//     branch8(cond, value, value)
+// Is the same as this:
+//     branch32(cond, signExt8(value), signExt8(value))
+template
+inline bool isSigned(typename MacroAssemblerType::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Equal:
+    case MacroAssemblerType::NotEqual:
+    case MacroAssemblerType::GreaterThan:
+    case MacroAssemblerType::GreaterThanOrEqual:
+    case MacroAssemblerType::LessThan:
+    case MacroAssemblerType::LessThanOrEqual:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     branch8(cond, value, value)
+// Is the same as this:
+//     branch32(cond, zeroExt8(value), zeroExt8(value))
+template
+inline bool isUnsigned(typename MacroAssemblerType::RelationalCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Equal:
+    case MacroAssemblerType::NotEqual:
+    case MacroAssemblerType::Above:
+    case MacroAssemblerType::AboveOrEqual:
+    case MacroAssemblerType::Below:
+    case MacroAssemblerType::BelowOrEqual:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     test8(cond, value, value)
+// Is the same as this:
+//     test32(cond, signExt8(value), signExt8(value))
+template
+inline bool isSigned(typename MacroAssemblerType::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Signed:
+    case MacroAssemblerType::PositiveOrZero:
+    case MacroAssemblerType::Zero:
+    case MacroAssemblerType::NonZero:
+        return true;
+    default:
+        return false;
+    }
+}
+
+// True if this:
+//     test8(cond, value, value)
+// Is the same as this:
+//     test32(cond, zeroExt8(value), zeroExt8(value))
+template
+inline bool isUnsigned(typename MacroAssemblerType::ResultCondition cond)
+{
+    switch (cond) {
+    case MacroAssemblerType::Zero:
+    case MacroAssemblerType::NonZero:
+        return true;
+    default:
+        return false;
+    }
+}
+
+template
+inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::RelationalCondition cond, typename MacroAssemblerType::TrustedImm32 value)
+{
+    if (isUnsigned(cond))
+        return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+    return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+}
+
+template
+inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::ResultCondition cond, typename MacroAssemblerType::TrustedImm32 value)
+{
+    if (isUnsigned(cond))
+        return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+    ASSERT_WITH_MESSAGE(cond != MacroAssemblerType::Overflow, "Overflow is not used for 8bit test operations.");
+    ASSERT(isSigned(cond));
+    return typename MacroAssemblerType::TrustedImm32(static_cast(value.m_value));
+}
+
+template
+void load8OnCondition(MacroAssemblerType& jit, Condition cond, Args... args)
+{
+    if (isUnsigned(cond))
+        return jit.load8(std::forward(args)...);
+    return jit.load8SignedExtendTo32(std::forward(args)...);
+}
+
+} } // namespace JSC
diff --git a/assembler/MacroAssemblerMIPS.h b/assembler/MacroAssemblerMIPS.h
new file mode 100644
index 0000000..a688e98
--- /dev/null
+++ b/assembler/MacroAssemblerMIPS.h
@@ -0,0 +1,3021 @@
+/*
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerMIPS : public AbstractMacroAssembler {
+public:
+    typedef MIPSRegisters::FPRegisterID FPRegisterID;
+
+    MacroAssemblerMIPS()
+        : m_fixedWidth(false)
+    {
+    }
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value >= -2147483647 - 1 && value <= 2147483647;
+    }
+
+    static const Scale ScalePtr = TimesFour;
+
+    // For storing immediate number
+    static const RegisterID immTempRegister = MIPSRegisters::t0;
+    // For storing data loaded from the memory
+    static const RegisterID dataTempRegister = MIPSRegisters::t1;
+    // For storing address base
+    static const RegisterID addrTempRegister = MIPSRegisters::t7;
+    // For storing compare result
+    static const RegisterID cmpTempRegister = MIPSRegisters::t8;
+
+    // FP temp register
+    static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
+
+    static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
+
+    enum RelationalCondition {
+        Equal,
+        NotEqual,
+        Above,
+        AboveOrEqual,
+        Below,
+        BelowOrEqual,
+        GreaterThan,
+        GreaterThanOrEqual,
+        LessThan,
+        LessThanOrEqual
+    };
+
+    enum ResultCondition {
+        Overflow,
+        Signed,
+        PositiveOrZero,
+        Zero,
+        NonZero
+    };
+
+    enum DoubleCondition {
+        DoubleEqual,
+        DoubleNotEqual,
+        DoubleGreaterThan,
+        DoubleGreaterThanOrEqual,
+        DoubleLessThan,
+        DoubleLessThanOrEqual,
+        DoubleEqualOrUnordered,
+        DoubleNotEqualOrUnordered,
+        DoubleGreaterThanOrUnordered,
+        DoubleGreaterThanOrEqualOrUnordered,
+        DoubleLessThanOrUnordered,
+        DoubleLessThanOrEqualOrUnordered
+    };
+
+    static const RegisterID stackPointerRegister = MIPSRegisters::sp;
+    static const RegisterID framePointerRegister = MIPSRegisters::fp;
+    static const RegisterID returnAddressRegister = MIPSRegisters::ra;
+
+    // Integer arithmetic operations:
+    //
+    // Operations are typically two operand - operation(source, srcDst)
+    // For many operations the source may be an TrustedImm32, the srcDst operand
+    // may often be a memory location (explictly described using an Address
+    // object).
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.addu(dest, dest, src);
+    }
+
+    void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.addu(dest, op1, op2);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, dest, dest);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value >= -32768 && imm.m_value <= 32767
+            && !m_fixedWidth) {
+            /*
+              addiu     dest, src, imm
+            */
+            m_assembler.addiu(dest, src, imm.m_value);
+        } else {
+            /*
+              li        immTemp, imm
+              addu      dest, src, immTemp
+            */
+            move(imm, immTempRegister);
+            m_assembler.addu(dest, src, immTempRegister);
+        }
+    }
+
+    void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, src, dest);
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+              lw        dataTemp, offset(base)
+              li        immTemp, imm
+              addu      dataTemp, dataTemp, immTemp
+              sw        dataTemp, offset(base)
+            */
+            m_assembler.lw(dataTempRegister, address.base, address.offset);
+            if (imm.m_value >= -32768 && imm.m_value <= 32767
+                && !m_fixedWidth)
+                m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+            }
+            m_assembler.sw(dataTempRegister, address.base, address.offset);
+        } else {
+            /*
+              lui       addrTemp, (offset + 0x8000) >> 16
+              addu      addrTemp, addrTemp, base
+              lw        dataTemp, (offset & 0xffff)(addrTemp)
+              li        immtemp, imm
+              addu      dataTemp, dataTemp, immTemp
+              sw        dataTemp, (offset & 0xffff)(addrTemp)
+            */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+            if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+                m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+            }
+            m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+        }
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        add32(dataTempRegister, dest);
+    }
+
+    void add32(AbsoluteAddress src, RegisterID dest)
+    {
+        load32(src.m_ptr, dataTempRegister);
+        add32(dataTempRegister, dest);
+    }
+
+    void add32(RegisterID src, Address dest)
+    {
+        if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) {
+            /*
+              lw        dataTemp, offset(base)
+              addu      dataTemp, dataTemp, src
+              sw        dataTemp, offset(base)
+            */
+            m_assembler.lw(dataTempRegister, dest.base, dest.offset);
+            m_assembler.addu(dataTempRegister, dataTempRegister, src);
+            m_assembler.sw(dataTempRegister, dest.base, dest.offset);
+        } else {
+            /*
+              lui       addrTemp, (offset + 0x8000) >> 16
+              addu      addrTemp, addrTemp, base
+              lw        dataTemp, (offset & 0xffff)(addrTemp)
+              addu      dataTemp, dataTemp, src
+              sw        dataTemp, (offset & 0xffff)(addrTemp)
+            */
+            m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, dest.base);
+            m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset);
+            m_assembler.addu(dataTempRegister, dataTempRegister, src);
+            m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset);
+        }
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        /*
+           li   addrTemp, address
+           li   immTemp, imm
+           lw   cmpTemp, 0(addrTemp)
+           addu dataTemp, cmpTemp, immTemp
+           sw   dataTemp, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+        m_assembler.lw(cmpTempRegister, addrTempRegister, 0);
+        if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+            m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value);
+        else {
+            move(imm, immTempRegister);
+            m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister);
+        }
+        m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        /*
+            add32(imm, address)
+            sltu  immTemp, dataTemp, cmpTemp    # set carry-in bit
+            lw    dataTemp, 4(addrTemp)
+            addiu dataTemp, imm.m_value >> 31 ? -1 : 0
+            addu  dataTemp, dataTemp, immTemp
+            sw    dataTemp, 4(addrTemp)
+        */
+        add32(imm, address);
+        m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister);
+        m_assembler.lw(dataTempRegister, addrTempRegister, 4);
+        if (imm.m_value >> 31)
+            m_assembler.addiu(dataTempRegister, dataTempRegister, -1);
+        m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+        m_assembler.sw(dataTempRegister, addrTempRegister, 4);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        and32(dataTempRegister, dest);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andInsn(dest, dest, src);
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.andInsn(dest, op1, op2);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            move(MIPSRegisters::zero, dest);
+        else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth)
+            m_assembler.andi(dest, dest, imm.m_value);
+        else {
+            /*
+              li        immTemp, imm
+              and       dest, dest, immTemp
+            */
+            move(imm, immTempRegister);
+            m_assembler.andInsn(dest, dest, immTempRegister);
+        }
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            move(MIPSRegisters::zero, dest);
+        else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth)
+            m_assembler.andi(dest, src, imm.m_value);
+        else {
+            move(imm, immTempRegister);
+            m_assembler.andInsn(dest, src, immTempRegister);
+        }
+    }
+
+    void countLeadingZeros32(RegisterID src, RegisterID dest)
+    {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+        m_assembler.clz(dest, src);
+#else
+        static_assert(false, "CLZ opcode is not available for this ISA");
+#endif
+    }
+
+    void lshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.sllv(dest, dest, shiftAmount);
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.sllv(dest, src, shiftAmount);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        m_assembler.sllv(dest, dest, immTempRegister);
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        m_assembler.sllv(dest, src, immTempRegister);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mul(dest, dest, src);
+    }
+
+    void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.mul(dest, op1, op2);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            move(MIPSRegisters::zero, dest);
+        else if (imm.m_value == 1 && !m_fixedWidth)
+            move(src, dest);
+        else {
+            /*
+                li      dataTemp, imm
+                mul     dest, src, dataTemp
+            */
+            move(imm, dataTempRegister);
+            m_assembler.mul(dest, src, dataTempRegister);
+        }
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orInsn(dest, dest, src);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.orInsn(dest, op1, op2);
+    }
+
+    void or32(TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            return;
+
+        // TODO: Swap dataTempRegister and immTempRegister usage
+        load32(dest.m_ptr, immTempRegister);
+        or32(imm, immTempRegister);
+        store32(immTempRegister, dest.m_ptr);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            return;
+
+        if (imm.m_value > 0 && imm.m_value <= 65535
+            && !m_fixedWidth) {
+            m_assembler.ori(dest, dest, imm.m_value);
+            return;
+        }
+
+        /*
+            li      dataTemp, imm
+            or      dest, dest, dataTemp
+        */
+        move(imm, dataTempRegister);
+        m_assembler.orInsn(dest, dest, dataTempRegister);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth) {
+            move(src, dest);
+            return;
+        }
+
+        if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) {
+            m_assembler.ori(dest, src, imm.m_value);
+            return;
+        }
+
+        /*
+            li      dataTemp, imm
+            or      dest, src, dataTemp
+        */
+        move(imm, dataTempRegister);
+        m_assembler.orInsn(dest, src, dataTempRegister);
+    }
+
+    void or32(RegisterID src, AbsoluteAddress dest)
+    {
+        load32(dest.m_ptr, dataTempRegister);
+        m_assembler.orInsn(dataTempRegister, dataTempRegister, src);
+        store32(dataTempRegister, dest.m_ptr);
+    }
+
+    void rshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.srav(dest, dest, shiftAmount);
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.srav(dest, src, shiftAmount);
+    }
+
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.sra(dest, dest, imm.m_value);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.sra(dest, src, imm.m_value);
+    }
+
+    void urshift32(RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.srlv(dest, dest, shiftAmount);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        m_assembler.srlv(dest, src, shiftAmount);
+    }
+
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.srl(dest, dest, imm.m_value);
+    }
+
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.srl(dest, src, imm.m_value);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subu(dest, dest, src);
+    }
+
+    void sub32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.subu(dest, op1, op2);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value >= -32767 && imm.m_value <= 32768
+            && !m_fixedWidth) {
+            /*
+              addiu     dest, src, imm
+            */
+            m_assembler.addiu(dest, dest, -imm.m_value);
+        } else {
+            /*
+              li        immTemp, imm
+              subu      dest, src, immTemp
+            */
+            move(imm, immTempRegister);
+            m_assembler.subu(dest, dest, immTempRegister);
+        }
+    }
+
+    void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value >= -32767 && imm.m_value <= 32768
+            && !m_fixedWidth) {
+            /*
+              addiu     dest, src, imm
+            */
+            m_assembler.addiu(dest, src, -imm.m_value);
+        } else {
+            /*
+              li        immTemp, imm
+              subu      dest, src, immTemp
+            */
+            move(imm, immTempRegister);
+            m_assembler.subu(dest, src, immTempRegister);
+        }
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+              lw        dataTemp, offset(base)
+              li        immTemp, imm
+              subu      dataTemp, dataTemp, immTemp
+              sw        dataTemp, offset(base)
+            */
+            m_assembler.lw(dataTempRegister, address.base, address.offset);
+            if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+                m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+            }
+            m_assembler.sw(dataTempRegister, address.base, address.offset);
+        } else {
+            /*
+              lui       addrTemp, (offset + 0x8000) >> 16
+              addu      addrTemp, addrTemp, base
+              lw        dataTemp, (offset & 0xffff)(addrTemp)
+              li        immtemp, imm
+              subu      dataTemp, dataTemp, immTemp
+              sw        dataTemp, (offset & 0xffff)(addrTemp)
+            */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+            if (imm.m_value >= -32767 && imm.m_value <= 32768
+                && !m_fixedWidth)
+                m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+            }
+            m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+        }
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        sub32(dataTempRegister, dest);
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        /*
+           li   addrTemp, address
+           li   immTemp, imm
+           lw   dataTemp, 0(addrTemp)
+           subu dataTemp, dataTemp, immTemp
+           sw   dataTemp, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+        m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+
+        if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+            m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+        else {
+            move(imm, immTempRegister);
+            m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+        }
+        m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xorInsn(dest, dest, src);
+    }
+
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        m_assembler.xorInsn(dest, op1, op2);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == -1) {
+            m_assembler.nor(dest, dest, MIPSRegisters::zero);
+            return;
+        }
+
+        /*
+            li  immTemp, imm
+            xor dest, dest, immTemp
+        */
+        move(imm, immTempRegister);
+        m_assembler.xorInsn(dest, dest, immTempRegister);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (imm.m_value == -1) {
+            m_assembler.nor(dest, src, MIPSRegisters::zero);
+            return;
+        }
+
+        /*
+            li  immTemp, imm
+            xor dest, dest, immTemp
+        */
+        move(imm, immTempRegister);
+        m_assembler.xorInsn(dest, src, immTempRegister);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.sqrtd(dst, src);
+    }
+
+    void absDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.absd(dst, src);
+    }
+
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+        /*
+            lui     addrTemp, (offset + 0x8000) >> 16
+            addu    addrTemp, addrTemp, base
+            lw      dest, (offset & 0xffff)(addrTemp)
+        */
+        m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+        m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+        m_assembler.lw(dest, addrTempRegister, address.offset);
+        return result;
+    }
+
+    // Memory access operations:
+    //
+    // Loads are of the form load(address, destination) and stores of the form
+    // store(source, address). The source for a store may be an TrustedImm32. Address
+    // operand objects to loads and store will be implicitly constructed if a
+    // register is passed.
+
+    /* Need to use zero-extened load byte for load8.  */
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.lbu(dest, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                lbu     dest, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lbu(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+             sll     addrTemp, address.index, address.scale
+             addu    addrTemp, addrTemp, address.base
+             lbu     dest, address.offset(addrTemp)
+             */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lbu(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+             sll     addrTemp, address.index, address.scale
+             addu    addrTemp, addrTemp, address.base
+             lui     immTemp, (address.offset + 0x8000) >> 16
+             addu    addrTemp, addrTemp, immTemp
+             lbu     dest, (address.offset & 0xffff)(at)
+             */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lbu(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    ALWAYS_INLINE void load8(AbsoluteAddress address, RegisterID dest)
+    {
+        load8(address.m_ptr, dest);
+    }
+
+    void load8(const void* address, RegisterID dest)
+    {
+        /*
+            li  addrTemp, address
+            lbu dest, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address), addrTempRegister);
+        m_assembler.lbu(dest, addrTempRegister, 0);
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lb      dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lb(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lb     dest, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lb(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.lw(dest, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                lw      dest, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lw(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lw      dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lw(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lw      dest, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lw(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+            /*
+                sll     addrtemp, address.index, address.scale
+                addu    addrtemp, addrtemp, address.base
+                lbu     immTemp, address.offset+x(addrtemp) (x=0 for LE, x=1 for BE)
+                lbu     dest, address.offset+x(addrtemp)    (x=1 for LE, x=0 for BE)
+                sll     dest, dest, 8
+                or      dest, dest, immTemp
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lbu(immTempRegister, addrTempRegister, address.offset + 1);
+            m_assembler.lbu(dest, addrTempRegister, address.offset);
+#else
+            m_assembler.lbu(immTempRegister, addrTempRegister, address.offset);
+            m_assembler.lbu(dest, addrTempRegister, address.offset + 1);
+#endif
+            m_assembler.sll(dest, dest, 8);
+            m_assembler.orInsn(dest, dest, immTempRegister);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, address.offset >> 16
+                ori     immTemp, immTemp, address.offset & 0xffff
+                addu    addrTemp, addrTemp, immTemp
+                lbu     immTemp, x(addrtemp) (x=0 for LE, x=1 for BE)
+                lbu     dest, x(addrtemp)    (x=1 for LE, x=0 for BE)
+                sll     dest, dest, 8
+                or      dest, dest, immTemp
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, address.offset >> 16);
+            m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lbu(immTempRegister, addrTempRegister, 1);
+            m_assembler.lbu(dest, addrTempRegister, 0);
+#else
+            m_assembler.lbu(immTempRegister, addrTempRegister, 0);
+            m_assembler.lbu(dest, addrTempRegister, 1);
+#endif
+            m_assembler.sll(dest, dest, 8);
+            m_assembler.orInsn(dest, dest, immTempRegister);
+        }
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32764
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                (Big-Endian)
+                lwl     dest, address.offset(addrTemp)
+                lwr     dest, address.offset+3(addrTemp)
+                (Little-Endian)
+                lwl     dest, address.offset+3(addrTemp)
+                lwr     dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lwl(dest, addrTempRegister, address.offset);
+            m_assembler.lwr(dest, addrTempRegister, address.offset + 3);
+#else
+            m_assembler.lwl(dest, addrTempRegister, address.offset + 3);
+            m_assembler.lwr(dest, addrTempRegister, address.offset);
+
+#endif
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, address.offset >> 16
+                ori     immTemp, immTemp, address.offset & 0xffff
+                addu    addrTemp, addrTemp, immTemp
+                (Big-Endian)
+                lw      dest, 0(at)
+                lw      dest, 3(at)
+                (Little-Endian)
+                lw      dest, 3(at)
+                lw      dest, 0(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, address.offset >> 16);
+            m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+            m_assembler.lwl(dest, addrTempRegister, 0);
+            m_assembler.lwr(dest, addrTempRegister, 3);
+#else
+            m_assembler.lwl(dest, addrTempRegister, 3);
+            m_assembler.lwr(dest, addrTempRegister, 0);
+#endif
+        }
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        /*
+            li  addrTemp, address
+            lw  dest, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address), addrTempRegister);
+        m_assembler.lw(dest, addrTempRegister, 0);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        m_fixedWidth = true;
+        /*
+            lui addrTemp, address.offset >> 16
+            ori addrTemp, addrTemp, address.offset & 0xffff
+            addu        addrTemp, addrTemp, address.base
+            lw  dest, 0(addrTemp)
+        */
+        DataLabel32 dataLabel(this);
+        move(TrustedImm32(address.offset), addrTempRegister);
+        m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+        m_assembler.lw(dest, addrTempRegister, 0);
+        m_fixedWidth = false;
+        return dataLabel;
+    }
+    
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabelCompact dataLabel(this);
+        load32WithAddressOffsetPatch(address, dest);
+        return dataLabel;
+    }
+
+    /* Need to use zero-extened load half-word for load16.  */
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.lhu(dest, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                lhu     dest, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lhu(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    /* Need to use zero-extened load half-word for load16.  */
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lhu     dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lhu(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lhu     dest, (address.offset & 0xffff)(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lhu(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lh     dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lh(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lh     dest, (address.offset & 0xffff)(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lh(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        m_fixedWidth = true;
+        /*
+            lui addrTemp, address.offset >> 16
+            ori addrTemp, addrTemp, address.offset & 0xffff
+            addu        addrTemp, addrTemp, address.base
+            sw  src, 0(addrTemp)
+        */
+        DataLabel32 dataLabel(this);
+        move(TrustedImm32(address.offset), addrTempRegister);
+        m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+        m_assembler.sw(src, addrTempRegister, 0);
+        m_fixedWidth = false;
+        return dataLabel;
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                sb      src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sb(src, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                sb      src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.sb(src, addrTempRegister, address.offset);
+        }
+    }
+
+    void store8(RegisterID src, void* address)
+    {
+        move(TrustedImmPtr(address), addrTempRegister);
+        m_assembler.sb(src, addrTempRegister, 0);
+    }
+
+    void store8(TrustedImm32 imm, void* address)
+    {
+        /*
+            li  immTemp, imm
+            li  addrTemp, address
+            sb  src, 0(addrTemp)
+        */
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (!imm8.m_value && !m_fixedWidth) {
+            move(TrustedImmPtr(address), addrTempRegister);
+            m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
+        } else {
+            move(imm8, immTempRegister);
+            move(TrustedImmPtr(address), addrTempRegister);
+            m_assembler.sb(immTempRegister, addrTempRegister, 0);
+        }
+    }
+
+    void store8(TrustedImm32 imm, ImplicitAddress address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            if (!imm8.m_value)
+                m_assembler.sb(MIPSRegisters::zero, address.base, address.offset);
+            else {
+                move(imm8, immTempRegister);
+                m_assembler.sb(immTempRegister, address.base, address.offset);
+            }
+        } else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                sb      immTemp, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            if (!imm8.m_value && !m_fixedWidth)
+                m_assembler.sb(MIPSRegisters::zero, addrTempRegister, address.offset);
+            else {
+                move(imm8, immTempRegister);
+                m_assembler.sb(immTempRegister, addrTempRegister, address.offset);
+            }
+        }
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                sh      src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sh(src, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                sh      src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.sh(src, addrTempRegister, address.offset);
+        }
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.sw(src, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                sw      src, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sw(src, addrTempRegister, address.offset);
+        }
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                sw      src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sw(src, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                sw      src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.sw(src, addrTempRegister, address.offset);
+        }
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            if (!imm.m_value)
+                m_assembler.sw(MIPSRegisters::zero, address.base, address.offset);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.sw(immTempRegister, address.base, address.offset);
+            }
+        } else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                sw      immTemp, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            if (!imm.m_value && !m_fixedWidth)
+                m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+            }
+        }
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                sw      src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            if (!imm.m_value)
+                m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+            }
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                sw      src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            if (!imm.m_value && !m_fixedWidth)
+                m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+            else {
+                move(imm, immTempRegister);
+                m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+            }
+        }
+    }
+
+
+    void store32(RegisterID src, const void* address)
+    {
+        /*
+            li  addrTemp, address
+            sw  src, 0(addrTemp)
+        */
+        move(TrustedImmPtr(address), addrTempRegister);
+        m_assembler.sw(src, addrTempRegister, 0);
+    }
+
+    void store32(TrustedImm32 imm, const void* address)
+    {
+        /*
+            li  immTemp, imm
+            li  addrTemp, address
+            sw  src, 0(addrTemp)
+        */
+        if (!imm.m_value && !m_fixedWidth) {
+            move(TrustedImmPtr(address), addrTempRegister);
+            m_assembler.sw(MIPSRegisters::zero, addrTempRegister, 0);
+        } else {
+            move(imm, immTempRegister);
+            move(TrustedImmPtr(address), addrTempRegister);
+            m_assembler.sw(immTempRegister, addrTempRegister, 0);
+        }
+    }
+
+    // Floating-point operations:
+
+    static bool supportsFloatingPoint()
+    {
+#if WTF_MIPS_DOUBLE_FLOAT
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    static bool supportsFloatingPointTruncate()
+    {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    static bool supportsFloatingPointSqrt()
+    {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    static bool supportsFloatingPointAbs()
+    {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+        return true;
+#else
+        return false;
+#endif
+    }
+
+    static bool supportsFloatingPointRounding() { return false; }
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data. Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack. Peek and poke operations read or write
+    // values on the stack, without moving the current stack position.
+
+    void pop(RegisterID dest)
+    {
+        m_assembler.lw(dest, MIPSRegisters::sp, 0);
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
+    }
+
+    void popPair(RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.lw(dest1, MIPSRegisters::sp, 0);
+        m_assembler.lw(dest2, MIPSRegisters::sp, 4);
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 8);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
+        m_assembler.sw(src, MIPSRegisters::sp, 0);
+    }
+
+    void push(Address address)
+    {
+        load32(address, dataTempRegister);
+        push(dataTempRegister);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        move(imm, immTempRegister);
+        push(immTempRegister);
+    }
+
+    void pushPair(RegisterID src1, RegisterID src2)
+    {
+        m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -8);
+        m_assembler.sw(src2, MIPSRegisters::sp, 4);
+        m_assembler.sw(src1, MIPSRegisters::sp, 0);
+    }
+
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value && !m_fixedWidth)
+            move(MIPSRegisters::zero, dest);
+        else if (m_fixedWidth) {
+            m_assembler.lui(dest, imm.m_value >> 16);
+            m_assembler.ori(dest, dest, imm.m_value);
+        } else
+            m_assembler.li(dest, imm.m_value);
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest || m_fixedWidth)
+            m_assembler.move(dest, src);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        move(TrustedImm32(imm), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        move(reg1, immTempRegister);
+        move(reg2, reg1);
+        move(immTempRegister, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest || m_fixedWidth)
+            move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest || m_fixedWidth)
+            move(src, dest);
+    }
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        move(right8, immTempRegister);
+        compare32(cond, dataTempRegister, immTempRegister, dest);
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister);
+        // Be careful that the previous load8() uses immTempRegister.
+        // So, we need to put move() after load8().
+        move(right8, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        if (cond == Equal)
+            return branchEqual(left, right);
+        if (cond == NotEqual)
+            return branchNotEqual(left, right);
+        if (cond == Above) {
+            m_assembler.sltu(cmpTempRegister, right, left);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == AboveOrEqual) {
+            m_assembler.sltu(cmpTempRegister, left, right);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Below) {
+            m_assembler.sltu(cmpTempRegister, left, right);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == BelowOrEqual) {
+            m_assembler.sltu(cmpTempRegister, right, left);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == GreaterThan) {
+            m_assembler.slt(cmpTempRegister, right, left);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == GreaterThanOrEqual) {
+            m_assembler.slt(cmpTempRegister, left, right);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == LessThan) {
+            m_assembler.slt(cmpTempRegister, left, right);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == LessThanOrEqual) {
+            m_assembler.slt(cmpTempRegister, right, left);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+
+        return Jump();
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        move(right, immTempRegister);
+        return branch32(cond, left, immTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        load32(right, dataTempRegister);
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        load32(left, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        load32(left, dataTempRegister);
+        move(right, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32(left, dataTempRegister);
+        // Be careful that the previous load32() uses immTempRegister.
+        // So, we need to put move() after load32().
+        move(right, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        load32WithUnalignedHalfWords(left, dataTempRegister);
+        // Be careful that the previous load32WithUnalignedHalfWords()
+        // uses immTempRegister.
+        // So, we need to put move() after load32WithUnalignedHalfWords().
+        move(right, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        load32(left.m_ptr, dataTempRegister);
+        move(right, immTempRegister);
+        return branch32(cond, dataTempRegister, immTempRegister);
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
+        m_assembler.andInsn(cmpTempRegister, reg, mask);
+        switch (cond) {
+        case Zero:
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        case NonZero:
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        case Signed:
+            m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed));
+        if (mask.m_value == -1 && !m_fixedWidth) {
+            switch (cond) {
+            case Zero:
+                return branchEqual(reg, MIPSRegisters::zero);
+            case NonZero:
+                return branchNotEqual(reg, MIPSRegisters::zero);
+            case Signed:
+                m_assembler.slt(cmpTempRegister, reg, MIPSRegisters::zero);
+                return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+        move(mask, immTempRegister);
+        return branchTest32(cond, reg, immTempRegister);
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load32(address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask);
+    }
+
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(dataTempRegister), dataTempRegister);
+        return branchTest32(cond, dataTempRegister, mask8);
+    }
+
+    Jump jump()
+    {
+        return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
+    }
+
+    void jump(RegisterID target)
+    {
+        move(target, MIPSRegisters::t9);
+        m_assembler.jr(MIPSRegisters::t9);
+        m_assembler.nop();
+    }
+
+    void jump(Address address)
+    {
+        m_fixedWidth = true;
+        load32(address, MIPSRegisters::t9);
+        m_assembler.jr(MIPSRegisters::t9);
+        m_assembler.nop();
+        m_fixedWidth = false;
+    }
+
+    void jump(AbsoluteAddress address)
+    {
+        m_fixedWidth = true;
+        load32(address.m_ptr, MIPSRegisters::t9);
+        m_assembler.jr(MIPSRegisters::t9);
+        m_assembler.nop();
+        m_fixedWidth = false;
+    }
+
+    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.vmov(dest1, dest2, src);
+    }
+
+    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+    {
+        UNUSED_PARAM(scratch);
+        m_assembler.vmov(dest, src1, src2);
+    }
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation. The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                move    dest, dataTemp
+                xor     cmpTemp, dataTemp, src
+                bltz    cmpTemp, No_overflow    # diff sign bit -> no overflow
+                addu    dest, dataTemp, src
+                xor     cmpTemp, dest, dataTemp
+                bgez    cmpTemp, No_overflow    # same sign big -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            move(dest, dataTempRegister);
+            m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+            m_assembler.bltz(cmpTempRegister, 10);
+            m_assembler.addu(dest, dataTempRegister, src);
+            m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+            m_assembler.bgez(cmpTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            add32(src, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == PositiveOrZero) {
+            add32(src, dest);
+            // Check if dest is not negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            add32(src, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            add32(src, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                move    dataTemp, op1
+                xor     cmpTemp, dataTemp, op2
+                bltz    cmpTemp, No_overflow    # diff sign bit -> no overflow
+                addu    dest, dataTemp, op2
+                xor     cmpTemp, dest, dataTemp
+                bgez    cmpTemp, No_overflow    # same sign big -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            move(op1, dataTempRegister);
+            m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+            m_assembler.bltz(cmpTempRegister, 10);
+            m_assembler.addu(dest, dataTempRegister, op2);
+            m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+            m_assembler.bgez(cmpTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            add32(op1, op2, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == PositiveOrZero) {
+            add32(op1, op2, dest);
+            // Check if dest is not negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            add32(op1, op2, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            add32(op1, op2, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        return branchAdd32(cond, immTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address address, RegisterID dest)
+    {
+        load32(address, immTempRegister);
+        return branchAdd32(cond, immTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        move(src, dest);
+        return branchAdd32(cond, immTempRegister, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                move    dataTemp, dest
+                xori    cmpTemp, dataTemp, imm
+                bltz    cmpTemp, No_overflow    # diff sign bit -> no overflow
+                addiu   dataTemp, dataTemp, imm
+                move    dest, dataTemp
+                xori    cmpTemp, dataTemp, imm
+                bgez    cmpTemp, No_overflow    # same sign big -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            if (imm.m_value >= -32768 && imm.m_value  <= 32767 && !m_fixedWidth) {
+                load32(dest.m_ptr, dataTempRegister);
+                m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+                m_assembler.bltz(cmpTempRegister, 10);
+                m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+                store32(dataTempRegister, dest.m_ptr);
+                m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+                m_assembler.bgez(cmpTempRegister, 7);
+                m_assembler.nop();
+            } else {
+                load32(dest.m_ptr, dataTempRegister);
+                move(imm, immTempRegister);
+                m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+                m_assembler.bltz(cmpTempRegister, 10);
+                m_assembler.addiu(dataTempRegister, dataTempRegister, immTempRegister);
+                store32(dataTempRegister, dest.m_ptr);
+                m_assembler.xori(cmpTempRegister, dataTempRegister, immTempRegister);
+                m_assembler.bgez(cmpTempRegister, 7);
+                m_assembler.nop();
+            }
+            return jump();
+        }
+        move(imm, immTempRegister);
+        load32(dest.m_ptr, dataTempRegister);
+        add32(immTempRegister, dataTempRegister);
+        store32(dataTempRegister, dest.m_ptr);
+        if (cond == Signed) {
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == PositiveOrZero) {
+            // Check if dest is not negative.
+            m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+            return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero)
+            return branchEqual(dataTempRegister, MIPSRegisters::zero);
+        if (cond == NonZero)
+            return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                mult    src, dest
+                mfhi    dataTemp
+                mflo    dest
+                sra     addrTemp, dest, 31
+                beq     dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            m_assembler.mult(src1, src2);
+            m_assembler.mfhi(dataTempRegister);
+            m_assembler.mflo(dest);
+            m_assembler.sra(addrTempRegister, dest, 31);
+            m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            mul32(src1, src2, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            mul32(src1, src2, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            mul32(src1, src2, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                mult    src, dest
+                mfhi    dataTemp
+                mflo    dest
+                sra     addrTemp, dest, 31
+                beq     dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            m_assembler.mult(src, dest);
+            m_assembler.mfhi(dataTempRegister);
+            m_assembler.mflo(dest);
+            m_assembler.sra(addrTempRegister, dest, 31);
+            m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            mul32(src, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            mul32(src, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            mul32(src, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        return branchMul32(cond, immTempRegister, src, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                move    dest, dataTemp
+                xor     cmpTemp, dataTemp, src
+                bgez    cmpTemp, No_overflow    # same sign bit -> no overflow
+                subu    dest, dataTemp, src
+                xor     cmpTemp, dest, dataTemp
+                bgez    cmpTemp, No_overflow    # same sign bit -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            move(dest, dataTempRegister);
+            m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+            m_assembler.bgez(cmpTempRegister, 10);
+            m_assembler.subu(dest, dataTempRegister, src);
+            m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+            m_assembler.bgez(cmpTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            sub32(src, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            sub32(src, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            sub32(src, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        return branchSub32(cond, immTempRegister, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(imm, immTempRegister);
+        return branchSub32(cond, src, immTempRegister, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            /*
+                move    dataTemp, op1
+                xor     cmpTemp, dataTemp, op2
+                bgez    cmpTemp, No_overflow    # same sign bit -> no overflow
+                subu    dest, dataTemp, op2
+                xor     cmpTemp, dest, dataTemp
+                bgez    cmpTemp, No_overflow    # same sign bit -> no overflow
+                nop
+                b       Overflow
+                nop
+                nop
+                nop
+                nop
+                nop
+            No_overflow:
+            */
+            move(op1, dataTempRegister);
+            m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+            m_assembler.bgez(cmpTempRegister, 10);
+            m_assembler.subu(dest, dataTempRegister, op2);
+            m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+            m_assembler.bgez(cmpTempRegister, 7);
+            m_assembler.nop();
+            return jump();
+        }
+        if (cond == Signed) {
+            sub32(op1, op2, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            sub32(op1, op2, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            sub32(op1, op2, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+    {
+        m_assembler.li(dataTempRegister, -1);
+        return branchMul32(cond, dataTempRegister, srcDest);
+    }
+
+    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Signed) {
+            or32(src, dest);
+            // Check if dest is negative.
+            m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+            return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+        }
+        if (cond == Zero) {
+            or32(src, dest);
+            return branchEqual(dest, MIPSRegisters::zero);
+        }
+        if (cond == NonZero) {
+            or32(src, dest);
+            return branchNotEqual(dest, MIPSRegisters::zero);
+        }
+        ASSERT(0);
+        return Jump();
+    }
+
+    // Miscellaneous operations:
+
+    void breakpoint()
+    {
+        m_assembler.bkpt();
+    }
+
+    Call nearCall()
+    {
+        /* We need two words for relaxation. */
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.jal();
+        m_assembler.nop();
+        return Call(m_assembler.label(), Call::LinkableNear);
+    }
+
+    Call nearTailCall()
+    {
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0);
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Call(m_assembler.label(), Call::LinkableNearTail);
+    }
+
+    Call call()
+    {
+        m_assembler.lui(MIPSRegisters::t9, 0);
+        m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
+        m_assembler.jalr(MIPSRegisters::t9);
+        m_assembler.nop();
+        return Call(m_assembler.label(), Call::Linkable);
+    }
+
+    Call call(RegisterID target)
+    {
+        move(target, MIPSRegisters::t9);
+        m_assembler.jalr(MIPSRegisters::t9);
+        m_assembler.nop();
+        return Call(m_assembler.label(), Call::None);
+    }
+
+    Call call(Address address)
+    {
+        m_fixedWidth = true;
+        load32(address, MIPSRegisters::t9);
+        m_assembler.jalr(MIPSRegisters::t9);
+        m_assembler.nop();
+        m_fixedWidth = false;
+        return Call(m_assembler.label(), Call::None);
+    }
+
+    void ret()
+    {
+        m_assembler.jr(MIPSRegisters::ra);
+        m_assembler.nop();
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        if (cond == Equal) {
+            m_assembler.xorInsn(dest, left, right);
+            m_assembler.sltiu(dest, dest, 1);
+        } else if (cond == NotEqual) {
+            m_assembler.xorInsn(dest, left, right);
+            m_assembler.sltu(dest, MIPSRegisters::zero, dest);
+        } else if (cond == Above)
+            m_assembler.sltu(dest, right, left);
+        else if (cond == AboveOrEqual) {
+            m_assembler.sltu(dest, left, right);
+            m_assembler.xori(dest, dest, 1);
+        } else if (cond == Below)
+            m_assembler.sltu(dest, left, right);
+        else if (cond == BelowOrEqual) {
+            m_assembler.sltu(dest, right, left);
+            m_assembler.xori(dest, dest, 1);
+        } else if (cond == GreaterThan)
+            m_assembler.slt(dest, right, left);
+        else if (cond == GreaterThanOrEqual) {
+            m_assembler.slt(dest, left, right);
+            m_assembler.xori(dest, dest, 1);
+        } else if (cond == LessThan)
+            m_assembler.slt(dest, left, right);
+        else if (cond == LessThanOrEqual) {
+            m_assembler.slt(dest, right, left);
+            m_assembler.xori(dest, dest, 1);
+        }
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        move(right, immTempRegister);
+        compare32(cond, left, immTempRegister, dest);
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister);
+        if ((mask8.m_value & 0xff) == 0xff && !m_fixedWidth) {
+            if (cond == Zero)
+                m_assembler.sltiu(dest, dataTempRegister, 1);
+            else
+                m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+        } else {
+            move(mask8, immTempRegister);
+            m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+            if (cond == Zero)
+                m_assembler.sltiu(dest, cmpTempRegister, 1);
+            else
+                m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+        }
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        load32(address, dataTempRegister);
+        if (mask.m_value == -1 && !m_fixedWidth) {
+            if (cond == Zero)
+                m_assembler.sltiu(dest, dataTempRegister, 1);
+            else
+                m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+        } else {
+            move(mask, immTempRegister);
+            m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+            if (cond == Zero)
+                m_assembler.sltiu(dest, cmpTempRegister, 1);
+            else
+                m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+        }
+    }
+
+    DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+    {
+        m_fixedWidth = true;
+        DataLabel32 label(this);
+        move(imm, dest);
+        m_fixedWidth = false;
+        return label;
+    }
+
+    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+    {
+        m_fixedWidth = true;
+        DataLabelPtr label(this);
+        move(initialValue, dest);
+        m_fixedWidth = false;
+        return label;
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        m_fixedWidth = true;
+        dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+        Jump temp = branch32(cond, left, immTempRegister);
+        m_fixedWidth = false;
+        return temp;
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        m_fixedWidth = true;
+        load32(left, dataTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+        Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+        m_fixedWidth = false;
+        return temp;
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        m_fixedWidth = true;
+        load32(left, dataTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+        Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+        m_fixedWidth = false;
+        return temp;
+    }
+
+    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        m_fixedWidth = true;
+        DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister);
+        store32(dataTempRegister, address);
+        m_fixedWidth = false;
+        return dataLabel;
+    }
+
+    DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+    {
+        return storePtrWithPatch(TrustedImmPtr(0), address);
+    }
+
+    Call tailRecursiveCall()
+    {
+        // Like a normal call, but don't update the returned address register
+        m_fixedWidth = true;
+        move(TrustedImm32(0), MIPSRegisters::t9);
+        m_assembler.jr(MIPSRegisters::t9);
+        m_assembler.nop();
+        m_fixedWidth = false;
+        return Call(m_assembler.label(), Call::Linkable);
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lwc1    dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lwc1(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lwc1    dest, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lwc1(dest, addrTempRegister, address.offset);
+        }
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+#if WTF_MIPS_ISA(1)
+        /*
+            li          addrTemp, address.offset
+            addu        addrTemp, addrTemp, base
+            lwc1        dest, 0(addrTemp)
+            lwc1        dest+1, 4(addrTemp)
+         */
+        move(TrustedImm32(address.offset), addrTempRegister);
+        m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+        m_assembler.lwc1(dest, addrTempRegister, 0);
+        m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            m_assembler.ldc1(dest, address.base, address.offset);
+        } else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                ldc1    dest, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.ldc1(dest, addrTempRegister, address.offset);
+        }
+#endif
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+#if WTF_MIPS_ISA(1)
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lwc1    dest, address.offset(addrTemp)
+                lwc1    dest+1, (address.offset+4)(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lwc1(dest, addrTempRegister, address.offset);
+            m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                lwc1    dest, (address.offset & 0xffff)(at)
+                lwc1    dest+1, (address.offset & 0xffff + 4)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.lwc1(dest, addrTempRegister, address.offset);
+            m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+        }
+#else
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                ldc1    dest, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.ldc1(dest, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                ldc1    dest, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.ldc1(dest, addrTempRegister, address.offset);
+        }
+#endif
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+#if WTF_MIPS_ISA(1)
+        /*
+            li          addrTemp, address
+            lwc1        dest, 0(addrTemp)
+            lwc1        dest+1, 4(addrTemp)
+         */
+        move(address, addrTempRegister);
+        m_assembler.lwc1(dest, addrTempRegister, 0);
+        m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+        /*
+            li          addrTemp, address
+            ldc1        dest, 0(addrTemp)
+        */
+        move(address, addrTempRegister);
+        m_assembler.ldc1(dest, addrTempRegister, 0);
+#endif
+    }
+
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                swc1    src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.swc1(src, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                swc1    src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.swc1(src, addrTempRegister, address.offset);
+        }
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+#if WTF_MIPS_ISA(1)
+        /*
+            li          addrTemp, address.offset
+            addu        addrTemp, addrTemp, base
+            swc1        dest, 0(addrTemp)
+            swc1        dest+1, 4(addrTemp)
+         */
+        move(TrustedImm32(address.offset), addrTempRegister);
+        m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+        m_assembler.swc1(src, addrTempRegister, 0);
+        m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth)
+            m_assembler.sdc1(src, address.base, address.offset);
+        else {
+            /*
+                lui     addrTemp, (offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, base
+                sdc1    src, (offset & 0xffff)(addrTemp)
+              */
+            m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sdc1(src, addrTempRegister, address.offset);
+        }
+#endif
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+#if WTF_MIPS_ISA(1)
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                swc1    src, address.offset(addrTemp)
+                swc1    src+1, (address.offset + 4)(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.swc1(src, addrTempRegister, address.offset);
+            m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                swc1    src, (address.offset & 0xffff)(at)
+                swc1    src+1, (address.offset & 0xffff + 4)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.swc1(src, addrTempRegister, address.offset);
+            m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+        }
+#else
+        if (address.offset >= -32768 && address.offset <= 32767
+            && !m_fixedWidth) {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                sdc1    src, address.offset(addrTemp)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.sdc1(src, addrTempRegister, address.offset);
+        } else {
+            /*
+                sll     addrTemp, address.index, address.scale
+                addu    addrTemp, addrTemp, address.base
+                lui     immTemp, (address.offset + 0x8000) >> 16
+                addu    addrTemp, addrTemp, immTemp
+                sdc1    src, (address.offset & 0xffff)(at)
+            */
+            m_assembler.sll(addrTempRegister, address.index, address.scale);
+            m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+            m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+            m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+            m_assembler.sdc1(src, addrTempRegister, address.offset);
+        }
+#endif
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+#if WTF_MIPS_ISA(1)
+        move(address, addrTempRegister);
+        m_assembler.swc1(src, addrTempRegister, 0);
+        m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+        move(address, addrTempRegister);
+        m_assembler.sdc1(src, addrTempRegister, 0);
+#endif
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        if (src != dest || m_fixedWidth)
+            m_assembler.movd(dest, src);
+    }
+
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        convertInt32ToDouble(MIPSRegisters::zero, reg);
+    }
+
+    void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+    {
+        moveDouble(fr1, fpTempRegister);
+        moveDouble(fr2, fr1);
+        moveDouble(fpTempRegister, fr2);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.addd(dest, dest, src);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.addd(dest, op1, op2);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        m_assembler.addd(dest, dest, fpTempRegister);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+        m_assembler.addd(dest, dest, fpTempRegister);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.subd(dest, dest, src);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.subd(dest, op1, op2);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        m_assembler.subd(dest, dest, fpTempRegister);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.muld(dest, dest, src);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        m_assembler.muld(dest, dest, fpTempRegister);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.muld(dest, op1, op2);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.divd(dest, dest, src);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        m_assembler.divd(dest, op1, op2);
+    }
+
+    void divDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        m_assembler.divd(dest, dest, fpTempRegister);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.negd(dest, src);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.mtc1(src, fpTempRegister);
+        m_assembler.cvtdw(dest, fpTempRegister);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        m_assembler.mtc1(dataTempRegister, fpTempRegister);
+        m_assembler.cvtdw(dest, fpTempRegister);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        load32(src.m_ptr, dataTempRegister);
+        m_assembler.mtc1(dataTempRegister, fpTempRegister);
+        m_assembler.cvtdw(dest, fpTempRegister);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.cvtds(dst, src);
+    }
+
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.cvtsd(dst, src);
+    }
+
+    void insertRelaxationWords()
+    {
+        /* We need four words for relaxation. */
+        m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops;
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.nop();
+    }
+
+    Jump branchTrue()
+    {
+        m_assembler.appendJump();
+        m_assembler.bc1t();
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Jump(m_assembler.label());
+    }
+
+    Jump branchFalse()
+    {
+        m_assembler.appendJump();
+        m_assembler.bc1f();
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Jump(m_assembler.label());
+    }
+
+    Jump branchEqual(RegisterID rs, RegisterID rt)
+    {
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.appendJump();
+        m_assembler.beq(rs, rt, 0);
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Jump(m_assembler.label());
+    }
+
+    Jump branchNotEqual(RegisterID rs, RegisterID rt)
+    {
+        m_assembler.nop();
+        m_assembler.nop();
+        m_assembler.appendJump();
+        m_assembler.bne(rs, rt, 0);
+        m_assembler.nop();
+        insertRelaxationWords();
+        return Jump(m_assembler.label());
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        if (cond == DoubleEqual) {
+            m_assembler.ceqd(left, right);
+            return branchTrue();
+        }
+        if (cond == DoubleNotEqual) {
+            m_assembler.cueqd(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleGreaterThan) {
+            m_assembler.cngtd(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleGreaterThanOrEqual) {
+            m_assembler.cnged(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleLessThan) {
+            m_assembler.cltd(left, right);
+            return branchTrue();
+        }
+        if (cond == DoubleLessThanOrEqual) {
+            m_assembler.cled(left, right);
+            return branchTrue();
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            m_assembler.cueqd(left, right);
+            return branchTrue();
+        }
+        if (cond == DoubleNotEqualOrUnordered) {
+            m_assembler.ceqd(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleGreaterThanOrUnordered) {
+            m_assembler.coled(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+            m_assembler.coltd(left, right);
+            return branchFalse(); // false
+        }
+        if (cond == DoubleLessThanOrUnordered) {
+            m_assembler.cultd(left, right);
+            return branchTrue();
+        }
+        if (cond == DoubleLessThanOrEqualOrUnordered) {
+            m_assembler.culed(left, right);
+            return branchTrue();
+        }
+        ASSERT(0);
+
+        return Jump();
+    }
+
+    // Truncates 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, INT_MAX 0x7fffffff).
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        m_assembler.truncwd(fpTempRegister, src);
+        m_assembler.mfc1(dest, fpTempRegister);
+        return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
+    }
+
+    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        m_assembler.truncwd(fpTempRegister, src);
+        m_assembler.mfc1(dest, fpTempRegister);
+        return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
+    }
+
+    // Result is undefined if the value is outside of the integer range.
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.truncwd(fpTempRegister, src);
+        m_assembler.mfc1(dest, fpTempRegister);
+    }
+
+    // Result is undefined if src > 2^31
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.truncwd(fpTempRegister, src);
+        m_assembler.mfc1(dest, fpTempRegister);
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
+    {
+        m_assembler.cvtwd(fpTempRegister, src);
+        m_assembler.mfc1(dest, fpTempRegister);
+
+        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+        if (negZeroCheck)
+            failureCases.append(branch32(Equal, dest, MIPSRegisters::zero));
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        convertInt32ToDouble(dest, fpTemp);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fpTemp, src));
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+        return branchDouble(DoubleNotEqual, reg, scratch);
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+    }
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        RelationalCondition r;
+        if (cond == Equal)
+            r = NotEqual;
+        else if (cond == NotEqual)
+            r = Equal;
+        else if (cond == Above)
+            r = BelowOrEqual;
+        else if (cond == AboveOrEqual)
+            r = Below;
+        else if (cond == Below)
+            r = AboveOrEqual;
+        else if (cond == BelowOrEqual)
+            r = Above;
+        else if (cond == GreaterThan)
+            r = LessThanOrEqual;
+        else if (cond == GreaterThanOrEqual)
+            r = LessThan;
+        else if (cond == LessThan)
+            r = GreaterThanOrEqual;
+        else if (cond == LessThanOrEqual)
+            r = GreaterThan;
+        return r;
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+
+    void memoryFence()
+    {
+        m_assembler.sync();
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), dataTempRegister);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), immTempRegister);
+        abortWithReason(reason);
+    }
+
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast(MIPSAssembler::readCallTarget(call.dataLocation())));
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        MIPSAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        MIPSAssembler::maxJumpReplacementSize();
+        return 0;
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        return label.labelAtOffset(0);
+    }
+
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+    {
+        MIPSAssembler::revertJumpToMove(instructionStart.dataLocation(), immTempRegister, reinterpret_cast(initialValue) & 0xffff);
+    }
+
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+private:
+    // If m_fixedWidth is true, we will generate a fixed number of instructions.
+    // Otherwise, we can emit any number of instructions.
+    bool m_fixedWidth;
+
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            MIPSAssembler::linkJump(code, call.m_label, function.value());
+        else
+            MIPSAssembler::linkCall(code, call.m_label, function.value());
+    }
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
diff --git a/assembler/MacroAssemblerPrinter.cpp b/assembler/MacroAssemblerPrinter.cpp
new file mode 100644
index 0000000..c6c1757
--- /dev/null
+++ b/assembler/MacroAssemblerPrinter.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MacroAssemblerPrinter.h"
+
+#if ENABLE(MASM_PROBE)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+using CPUState = MacroAssembler::CPUState;
+using ProbeContext = MacroAssembler::ProbeContext;
+using RegisterID = MacroAssembler::RegisterID;
+using FPRegisterID = MacroAssembler::FPRegisterID;
+
+static void printIndent(int indentation)
+{
+    for (; indentation > 0; indentation--)
+        dataLog("    ");
+}
+
+#define INDENT printIndent(indentation)
+    
+void printCPU(CPUState& cpu, int indentation)
+{
+    INDENT, dataLog("cpu: {\n");
+    printCPURegisters(cpu, indentation + 1);
+    INDENT, dataLog("}\n");
+}
+
+void printCPURegisters(CPUState& cpu, int indentation)
+{
+#if USE(JSVALUE32_64)
+    #define INTPTR_HEX_VALUE_FORMAT "0x%08lx"
+#else
+    #define INTPTR_HEX_VALUE_FORMAT "0x%016lx"
+#endif
+
+    #define PRINT_GPREGISTER(_type, _regName) { \
+        intptr_t value = reinterpret_cast(cpu._regName); \
+        INDENT, dataLogF("%6s: " INTPTR_HEX_VALUE_FORMAT "  %ld\n", #_regName, value, value) ; \
+    }
+    FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER)
+    FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER)
+    #undef PRINT_GPREGISTER
+    #undef INTPTR_HEX_VALUE_FORMAT
+    
+    #define PRINT_FPREGISTER(_type, _regName) { \
+        uint64_t* u = reinterpret_cast(&cpu._regName); \
+        double* d = reinterpret_cast(&cpu._regName); \
+        INDENT, dataLogF("%6s: 0x%016llx  %.13g\n", #_regName, *u, *d); \
+    }
+    FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER)
+    #undef PRINT_FPREGISTER
+}
+
+static void printPC(CPUState& cpu)
+{
+    union {
+        void* voidPtr;
+        intptr_t intptrValue;
+    } u;
+#if CPU(X86) || CPU(X86_64)
+    u.voidPtr = cpu.eip;
+#elif CPU(ARM_TRADITIONAL) || CPU(ARM_THUMB2) || CPU(ARM64)
+    u.voidPtr = cpu.pc;
+#else
+#error "Unsupported CPU"
+#endif
+    dataLogF("pc:<%p %ld>", u.voidPtr, u.intptrValue);
+}
+
+void printRegister(CPUState& cpu, RegisterID regID)
+{
+    const char* name = CPUState::gprName(regID);
+    union {
+        void* voidPtr;
+        intptr_t intptrValue;
+    } u;
+    u.voidPtr = cpu.gpr(regID);
+    dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue);
+}
+
+void printRegister(CPUState& cpu, FPRegisterID regID)
+{
+    const char* name = CPUState::fprName(regID);
+    union {
+        double doubleValue;
+        uint64_t uint64Value;
+    } u;
+    u.doubleValue = cpu.fpr(regID);
+    dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue);
+}
+
+void printMemory(CPUState& cpu, const Memory& memory)
+{
+    uint8_t* ptr = nullptr;
+    switch (memory.addressType) {
+    case Memory::AddressType::Address: {
+        ptr = reinterpret_cast(cpu.gpr(memory.u.address.base));
+        ptr += memory.u.address.offset;
+        break;
+    }
+    case Memory::AddressType::AbsoluteAddress: {
+        ptr = reinterpret_cast(const_cast(memory.u.absoluteAddress.m_ptr));
+        break;
+    }
+    }
+
+    if (memory.dumpStyle == Memory::SingleWordDump) {
+        if (memory.numBytes == sizeof(int8_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%02x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int16_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%04x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int32_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%08x %d>", p, *p, *p);
+            return;
+        }
+        if (memory.numBytes == sizeof(int64_t)) {
+            auto p = reinterpret_cast(ptr);
+            dataLogF("%p:<0x%016llx %lld>", p, *p, *p);
+            return;
+        }
+        // Else, unknown word size. Fall thru and dump in the generic way.
+    }
+
+    // Generic dump: dump rows of 16 bytes in 4 byte groupings.
+    size_t numBytes = memory.numBytes;
+    for (size_t i = 0; i < numBytes; i++) {
+        if (!(i % 16))
+            dataLogF("%p: ", &ptr[i]);
+        else if (!(i % 4))
+            dataLog(" ");
+
+        dataLogF("%02x", ptr[i]);
+
+        if (i % 16 == 15)
+            dataLog("\n");
+    }
+    if (numBytes % 16 < 15)
+        dataLog("\n");
+}
+
+void MacroAssemblerPrinter::printCallback(ProbeContext* context)
+{
+    typedef PrintArg Arg;
+    PrintArgsList& argsList =
+    *reinterpret_cast(context->arg1);
+    for (size_t i = 0; i < argsList.size(); i++) {
+        auto& arg = argsList[i];
+        switch (arg.type) {
+        case Arg::Type::AllRegisters:
+            printCPU(context->cpu, 1);
+            break;
+        case Arg::Type::PCRegister:
+            printPC(context->cpu);
+            break;
+        case Arg::Type::RegisterID:
+            printRegister(context->cpu, arg.u.gpRegisterID);
+            break;
+        case Arg::Type::FPRegisterID:
+            printRegister(context->cpu, arg.u.fpRegisterID);
+            break;
+        case Arg::Type::Memory:
+            printMemory(context->cpu, arg.u.memory);
+            break;
+        case Arg::Type::ConstCharPtr:
+            dataLog(arg.u.constCharPtr);
+            break;
+        case Arg::Type::ConstVoidPtr:
+            dataLogF("%p", arg.u.constVoidPtr);
+            break;
+        case Arg::Type::IntptrValue:
+            dataLog(arg.u.intptrValue);
+            break;
+        case Arg::Type::UintptrValue:
+            dataLog(arg.u.uintptrValue);
+            break;
+        }
+    }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(MASM_PROBE)
diff --git a/assembler/MacroAssemblerPrinter.h b/assembler/MacroAssemblerPrinter.h
new file mode 100644
index 0000000..bbce7ee
--- /dev/null
+++ b/assembler/MacroAssemblerPrinter.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MASM_PROBE)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+// What is MacroAssembler::print()?
+// ===============================
+// The MacroAsssembler::print() makes it easy to add print logging
+// from JIT compiled code, and can be used to print all types of values
+// at runtime e.g. CPU register values being operated on by the compiled
+// code.
+//
+// print() is built on top of MacroAsssembler::probe(), and hence
+// inserting logging in JIT compiled code will not perturb register values.
+// The only register value that is perturbed is the PC (program counter)
+// since there is now more compiled code to do the printing.
+//
+// How to use the MacroAssembler print()?
+// =====================================
+// 1. #include "MacroAssemblerPrinter.h" in the JIT file where you want to use print().
+//
+// 2. Add print() calls like these in your JIT code:
+//
+//      jit.print("Hello world\n"); // Emits code to print the string.
+//
+//      CodeBlock* cb = ...;
+//      jit.print(cb, "\n");        // Emits code to print the pointer value.
+//
+//      RegisterID regID = ...;
+//      jit.print(regID, "\n");     // Emits code to print the register value (not the id).
+//
+//      // Emits code to print all registers. Unlike other items, this prints
+//      // multiple lines as follows:
+//      //      cpu {
+//      //          eax: 0x123456789
+//      //          ebx: 0x000000abc
+//      //          ...
+//      //      }
+//      jit.print(AllRegisters());
+//
+//      jit.print(MemWord(regID), "\n");   // Emits code to print a byte pointed to by the register.
+//      jit.print(MemWord(regID), "\n");  // Emits code to print a 32-bit word pointed to by the register.
+//
+//      jit.print(MemWord(Address(regID, 23), "\n");     // Emits code to print a byte at the address.
+//      jit.print(MemWord(AbsoluteAddress(&cb), "\n");  // Emits code to print an intptr_t sized word at the address.
+//
+//      jit.print(Memory(reg, 100), "\n");              // Emits code to print a 100 bytes at the address pointed by the register.
+//      jit.print(Memory(Address(reg, 4), 100), "\n");  // Emits code to print a 100 bytes at the address.
+//
+//      // Print multiple things at once. This incurs the probe overhead only once
+//      // to print all the items.
+//      jit.print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
+//
+//   The type of values that can be printed is encapsulated in the PrintArg struct below.
+//
+//   Note: print() does not automatically insert a '\n' at the end of the line.
+//   If you want a '\n', you'll have to add it explicitly (as in the examples above).
+
+
+// This is a marker type only used with MacroAssemblerPrinter::print().
+// See MacroAssemblerPrinter::print() below for details.
+struct AllRegisters { };
+struct PCRegister { };
+
+struct Memory {
+    using Address = MacroAssembler::Address;
+    using AbsoluteAddress = MacroAssembler::AbsoluteAddress;
+    using RegisterID = MacroAssembler::RegisterID;
+
+    enum class AddressType {
+        Address,
+        AbsoluteAddress,
+    };
+
+    enum DumpStyle {
+        SingleWordDump,
+        GenericDump,
+    };
+
+    Memory(RegisterID& reg, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::Address)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.address = Address(reg, 0);
+    }
+
+    Memory(const Address& address, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::Address)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.address = address;
+    }
+
+    Memory(const AbsoluteAddress& address, size_t bytes, DumpStyle style = GenericDump)
+        : addressType(AddressType::AbsoluteAddress)
+        , dumpStyle(style)
+        , numBytes(bytes)
+    {
+        u.absoluteAddress = address;
+    }
+
+    AddressType addressType;
+    DumpStyle dumpStyle;
+    size_t numBytes;
+    union UnionedAddress {
+        UnionedAddress() { }
+
+        Address address;
+        AbsoluteAddress absoluteAddress;
+    } u;
+};
+
+template 
+struct MemWord : public Memory {
+    MemWord(RegisterID& reg)
+        : Memory(reg, sizeof(IntType), Memory::SingleWordDump)
+    { }
+
+    MemWord(const Address& address)
+        : Memory(address, sizeof(IntType), Memory::SingleWordDump)
+    { }
+
+    MemWord(const AbsoluteAddress& address)
+        : Memory(address, sizeof(IntType), Memory::SingleWordDump)
+    { }
+};
+
+
+class MacroAssemblerPrinter {
+    using CPUState = MacroAssembler::CPUState;
+    using ProbeContext = MacroAssembler::ProbeContext;
+    using RegisterID = MacroAssembler::RegisterID;
+    using FPRegisterID = MacroAssembler::FPRegisterID;
+    
+public:
+    template
+    static void print(MacroAssembler* masm, Arguments... args)
+    {
+        auto argsList = std::make_unique();
+        appendPrintArg(argsList.get(), args...);
+        masm->probe(printCallback, argsList.release(), 0);
+    }
+    
+private:
+    struct PrintArg {
+
+        enum class Type {
+            AllRegisters,
+            PCRegister,
+            RegisterID,
+            FPRegisterID,
+            Memory,
+            ConstCharPtr,
+            ConstVoidPtr,
+            IntptrValue,
+            UintptrValue,
+        };
+        
+        PrintArg(AllRegisters&)
+            : type(Type::AllRegisters)
+        {
+        }
+        
+        PrintArg(PCRegister&)
+            : type(Type::PCRegister)
+        {
+        }
+        
+        PrintArg(RegisterID regID)
+            : type(Type::RegisterID)
+        {
+            u.gpRegisterID = regID;
+        }
+        
+        PrintArg(FPRegisterID regID)
+            : type(Type::FPRegisterID)
+        {
+            u.fpRegisterID = regID;
+        }
+
+        PrintArg(const Memory& memory)
+            : type(Type::Memory)
+        {
+            u.memory = memory;
+        }
+
+        PrintArg(const char* ptr)
+            : type(Type::ConstCharPtr)
+        {
+            u.constCharPtr = ptr;
+        }
+        
+        PrintArg(const void* ptr)
+            : type(Type::ConstVoidPtr)
+        {
+            u.constVoidPtr = ptr;
+        }
+        
+        PrintArg(int value)
+            : type(Type::IntptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(unsigned value)
+            : type(Type::UintptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(intptr_t value)
+            : type(Type::IntptrValue)
+        {
+            u.intptrValue = value;
+        }
+        
+        PrintArg(uintptr_t value)
+            : type(Type::UintptrValue)
+        {
+            u.uintptrValue = value;
+        }
+        
+        Type type;
+        union Value {
+            Value() { }
+
+            RegisterID gpRegisterID;
+            FPRegisterID fpRegisterID;
+            Memory memory;
+            const char* constCharPtr;
+            const void* constVoidPtr;
+            intptr_t intptrValue;
+            uintptr_t uintptrValue;
+        } u;
+    };
+
+    typedef Vector PrintArgsList;
+    
+    template
+    static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs)
+    {
+        argsList->append(PrintArg(firstArg));
+        appendPrintArg(argsList, otherArgs...);
+    }
+    
+    static void appendPrintArg(PrintArgsList*) { }
+
+private:
+    static void printCallback(ProbeContext*);
+};
+
+template
+void MacroAssembler::print(Arguments... args)
+{
+    MacroAssemblerPrinter::print(this, args...);
+}
+
+
+// These printers will print a block of information. That block may be
+// indented with the specified indentation.
+void printCPU(MacroAssembler::CPUState&, int indentation = 0);
+void printCPURegisters(MacroAssembler::CPUState&, int indentation = 0);
+
+// These printers will print the specified information in line in the
+// print stream. Hence, no indentation will be applied.
+void printRegister(MacroAssembler::CPUState&, MacroAssembler::RegisterID);
+void printRegister(MacroAssembler::CPUState&, MacroAssembler::FPRegisterID);
+void printMemory(MacroAssembler::CPUState&, const Memory&);
+
+} // namespace JSC
+
+#endif // ENABLE(MASM_PROBE)
diff --git a/assembler/MacroAssemblerSH4.h b/assembler/MacroAssemblerSH4.h
new file mode 100644
index 0000000..c492a40
--- /dev/null
+++ b/assembler/MacroAssemblerSH4.h
@@ -0,0 +1,2671 @@
+/*
+ * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008, 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "SH4Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include 
+
+namespace JSC {
+
+class MacroAssemblerSH4 : public AbstractMacroAssembler {
+public:
+    typedef SH4Assembler::FPRegisterID FPRegisterID;
+
+    static const Scale ScalePtr = TimesFour;
+    static const FPRegisterID fscratch = SH4Registers::dr10;
+    static const RegisterID stackPointerRegister = SH4Registers::sp;
+    static const RegisterID framePointerRegister = SH4Registers::fp;
+    static const RegisterID linkRegister = SH4Registers::pr;
+    static const RegisterID scratchReg3 = SH4Registers::r13;
+
+    static const int MaximumCompactPtrAlignedAddressOffset = 60;
+
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset) && (!(value & 3));
+    }
+
+    enum RelationalCondition {
+        Equal = SH4Assembler::EQ,
+        NotEqual = SH4Assembler::NE,
+        Above = SH4Assembler::HI,
+        AboveOrEqual = SH4Assembler::HS,
+        Below = SH4Assembler::LI,
+        BelowOrEqual = SH4Assembler::LS,
+        GreaterThan = SH4Assembler::GT,
+        GreaterThanOrEqual = SH4Assembler::GE,
+        LessThan = SH4Assembler::LT,
+        LessThanOrEqual = SH4Assembler::LE
+    };
+
+    enum ResultCondition {
+        Overflow = SH4Assembler::OF,
+        Signed = SH4Assembler::SI,
+        PositiveOrZero = SH4Assembler::NS,
+        Zero = SH4Assembler::EQ,
+        NonZero = SH4Assembler::NE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = SH4Assembler::EQ,
+        DoubleNotEqual = SH4Assembler::NE,
+        DoubleGreaterThan = SH4Assembler::GT,
+        DoubleGreaterThanOrEqual = SH4Assembler::GE,
+        DoubleLessThan = SH4Assembler::LT,
+        DoubleLessThanOrEqual = SH4Assembler::LE,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = SH4Assembler::EQU,
+        DoubleNotEqualOrUnordered = SH4Assembler::NEU,
+        DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
+        DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
+        DoubleLessThanOrUnordered = SH4Assembler::LTU,
+        DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
+    };
+
+    RegisterID claimScratch()
+    {
+        return m_assembler.claimScratch();
+    }
+
+    void releaseScratch(RegisterID reg)
+    {
+        m_assembler.releaseScratch(reg);
+    }
+
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+            return NotEqual;
+        case NotEqual:
+            return Equal;
+        case Above:
+            return BelowOrEqual;
+        case AboveOrEqual:
+            return Below;
+        case Below:
+            return AboveOrEqual;
+        case BelowOrEqual:
+            return Above;
+        case GreaterThan:
+            return LessThanOrEqual;
+        case GreaterThanOrEqual:
+            return LessThan;
+        case LessThan:
+            return GreaterThanOrEqual;
+        case LessThanOrEqual:
+            return GreaterThan;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    // Integer arithmetic operations
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.addlRegReg(src, dest);
+    }
+
+    void add32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            add32(src2, dest);
+        else {
+            move(src2, dest);
+            add32(src1, dest);
+        }
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            return;
+
+        if (m_assembler.isImmediate(imm.m_value)) {
+            m_assembler.addlImm8r(imm.m_value, dest);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        m_assembler.addlRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+        add32(imm, dest);
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        if (!imm.m_value)
+            return;
+
+        RegisterID scr = claimScratch();
+        load32(address, scr);
+        add32(imm, scr);
+        store32(scr, address);
+        releaseScratch(scr);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src, scr);
+        m_assembler.addlRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void add32(AbsoluteAddress src, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src.m_ptr, scr);
+        m_assembler.addlRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andlRegReg(src, dest);
+    }
+
+    void and32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            and32(src2, dest);
+        else {
+            move(src2, dest);
+            and32(src1, dest);
+        }
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src, scr);
+        and32(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value) {
+            m_assembler.movImm8(0, dest);
+            return;
+        }
+
+        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+            m_assembler.andlImm8r(imm.m_value, dest);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        m_assembler.andlRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (src != dest) {
+            move(imm, dest);
+            and32(src, dest);
+            return;
+        }
+
+        and32(imm, dest);
+    }
+
+    void lshift32(RegisterID shiftamount, RegisterID dest)
+    {
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(0x1f, shiftTmp);
+        m_assembler.andlRegReg(shiftamount, shiftTmp);
+        m_assembler.shldRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        move(src, dest);
+        lshift32(shiftAmount, dest);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        int immMasked = imm.m_value & 0x1f;
+        if (!immMasked)
+            return;
+
+        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
+            m_assembler.shllImm8r(immMasked, dest);
+            return;
+        }
+
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(immMasked, shiftTmp);
+        m_assembler.shldRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+    {
+        move(src, dest);
+        lshift32(shiftamount, dest);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        mul32(src, dest, dest);    
+    }
+
+    void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        m_assembler.imullRegReg(src1, src2);
+        m_assembler.stsmacl(dest);
+    }
+
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (src == dest) {
+            RegisterID immval = claimScratch();
+            move(imm, immval);
+            mul32(immval, dest);
+            releaseScratch(immval);
+        } else {
+            move(imm, dest);
+            mul32(src, dest);
+        }
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orlRegReg(src, dest);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+            m_assembler.orlImm8r(imm.m_value, dest);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        m_assembler.orlRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            move(op1, dest);
+        else if (op1 == dest)
+            or32(op2, dest);
+        else {
+            move(op2, dest);
+            or32(op1, dest);
+        }
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (src != dest) {
+            move(imm, dest);
+            or32(src, dest);
+            return;
+        }
+
+        or32(imm, dest);
+    }
+
+    void or32(RegisterID src, AbsoluteAddress address)
+    {
+        RegisterID destptr = claimScratch();
+        move(TrustedImmPtr(address.m_ptr), destptr);
+        RegisterID destval = claimScratch();
+        m_assembler.movlMemReg(destptr, destval);
+        m_assembler.orlRegReg(src, destval);
+        m_assembler.movlRegMem(destval, destptr);
+        releaseScratch(destval);
+        releaseScratch(destptr);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (src != dest) {
+            move(imm, dest);
+            xor32(src, dest);
+            return;
+        }
+
+        xor32(imm, dest);
+    }
+
+    void rshift32(RegisterID shiftamount, RegisterID dest)
+    {
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(0x1f, shiftTmp);
+        m_assembler.andlRegReg(shiftamount, shiftTmp);
+        m_assembler.neg(shiftTmp, shiftTmp);
+        m_assembler.shadRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        move(src, dest);
+        rshift32(shiftAmount, dest);
+    }
+
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        int immMasked = imm.m_value & 0x1f;
+        if (!immMasked)
+            return;
+
+        if (immMasked == 1) {
+            m_assembler.sharImm8r(immMasked, dest);
+            return;
+        }
+
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(-immMasked, shiftTmp);
+        m_assembler.shadRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move(src, dest);
+        rshift32(imm, dest);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sublRegReg(src, dest);
+    }
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        if (dest == right) {
+            neg32(dest);
+            add32(left, dest);
+            return;
+        }
+        move(left, dest);
+        sub32(right, dest);
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        if (!imm.m_value)
+            return;
+
+        RegisterID result = claimScratch();
+        RegisterID scratchReg = claimScratch();
+
+        move(TrustedImmPtr(address.m_ptr), scratchReg);
+        m_assembler.movlMemReg(scratchReg, result);
+
+        if (m_assembler.isImmediate(-imm.m_value))
+            m_assembler.addlImm8r(-imm.m_value, result);
+        else {
+            m_assembler.loadConstant(imm.m_value, scratchReg3);
+            m_assembler.sublRegReg(scratchReg3, result);
+        }
+
+        store32(result, scratchReg);
+        releaseScratch(result);
+        releaseScratch(scratchReg);
+    }
+
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        add32(TrustedImm32(-imm.m_value), address);
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        if (!imm.m_value)
+            return;
+
+        RegisterID result = claimScratch();
+        RegisterID scratchReg = claimScratch();
+
+        move(TrustedImmPtr(address.m_ptr), scratchReg);
+        m_assembler.movlMemReg(scratchReg, result);
+
+        if (m_assembler.isImmediate(imm.m_value))
+            m_assembler.addlImm8r(imm.m_value, result);
+        else {
+            m_assembler.loadConstant(imm.m_value, scratchReg3);
+            m_assembler.addlRegReg(scratchReg3, result);
+        }
+
+        store32(result, scratchReg);
+        releaseScratch(result);
+        releaseScratch(scratchReg);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        RegisterID scr1 = claimScratch();
+        RegisterID scr2 = claimScratch();
+
+        // Add 32-bit LSB first.
+        move(TrustedImmPtr(address.m_ptr), scratchReg3);
+        m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit LSB of int64 @ address
+        m_assembler.loadConstant(imm.m_value, scr2);
+        m_assembler.clrt();
+        m_assembler.addclRegReg(scr1, scr2);
+        m_assembler.movlRegMem(scr2, scratchReg3); // Update address with 32-bit LSB result.
+
+        // Then add 32-bit MSB.
+        m_assembler.addlImm8r(4, scratchReg3);
+        m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit MSB of int64 @ address
+        m_assembler.movt(scr2);
+        if (imm.m_value < 0)
+            m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
+        m_assembler.addvlRegReg(scr2, scr1);
+        m_assembler.movlRegMem(scr1, scratchReg3); // Update (address + 4) with 32-bit MSB result.
+
+        releaseScratch(scr2);
+        releaseScratch(scr1);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            return;
+
+        if (m_assembler.isImmediate(-imm.m_value)) {
+            m_assembler.addlImm8r(-imm.m_value, dest);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        m_assembler.sublRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src, scr);
+        m_assembler.sublRegReg(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xorlRegReg(src, dest);
+    }
+
+    void xor32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            xor32(src2, dest);
+        else {
+            move(src2, dest);
+            xor32(src1, dest);
+        }
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID srcDest)
+    {
+        if (imm.m_value == -1) {
+            m_assembler.notlReg(srcDest, srcDest);
+            return;
+        }
+
+        if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
+            RegisterID scr = claimScratch();
+            m_assembler.loadConstant(imm.m_value, scr);
+            m_assembler.xorlRegReg(scr, srcDest);
+            releaseScratch(scr);
+            return;
+        }
+
+        m_assembler.xorlImm8r(imm.m_value, srcDest);
+    }
+
+    void compare32(int imm, RegisterID dst, RelationalCondition cond)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
+            m_assembler.cmpEqImmR0(imm, dst);
+            return;
+        }
+
+        if (((cond == Equal) || (cond == NotEqual)) && !imm) {
+            m_assembler.testlRegReg(dst, dst);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm, scr);
+        m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
+        releaseScratch(scr);
+    }
+
+    void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
+    {
+        RegisterID scr = claimScratch();
+        if (!offset) {
+            m_assembler.movlMemReg(base, scr);
+            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+            releaseScratch(scr);
+            return;
+        }
+
+        if ((offset < 0) || (offset >= 64)) {
+            m_assembler.loadConstant(offset, scr);
+            m_assembler.addlRegReg(base, scr);
+            m_assembler.movlMemReg(scr, scr);
+            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+            releaseScratch(scr);
+            return;
+        }
+
+        m_assembler.movlMemReg(offset >> 2, base, scr);
+        m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+        releaseScratch(scr);
+    }
+
+    void testImm(int imm, int offset, RegisterID base)
+    {
+        RegisterID scr = claimScratch();
+        load32(base, offset, scr);
+
+        RegisterID scr1 = claimScratch();
+        move(TrustedImm32(imm), scr1);
+
+        m_assembler.testlRegReg(scr, scr1);
+        releaseScratch(scr);
+        releaseScratch(scr1);
+    }
+
+    void testlImm(int imm, RegisterID dst)
+    {
+        if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
+            m_assembler.testlImm8r(imm, dst);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm, scr);
+        m_assembler.testlRegReg(scr, dst);
+        releaseScratch(scr);
+    }
+
+    void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
+    {
+        if (!offset) {
+            RegisterID scr = claimScratch();
+            m_assembler.movlMemReg(base, scr);
+            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+            releaseScratch(scr);
+            return;
+        }
+
+        if ((offset < 0) || (offset >= 64)) {
+            RegisterID scr = claimScratch();
+            m_assembler.loadConstant(offset, scr);
+            m_assembler.addlRegReg(base, scr);
+            m_assembler.movlMemReg(scr, scr);
+            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+            releaseScratch(scr);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.movlMemReg(offset >> 2, base, scr);
+        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+        releaseScratch(scr);
+    }
+
+    void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
+    {
+        RegisterID scr = claimScratch();
+        load32(base, offset, scr);
+
+        RegisterID scr1 = claimScratch();
+        move(TrustedImm32(imm), scr1);
+
+        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+
+        releaseScratch(scr1);
+        releaseScratch(scr);
+    }
+
+    // Memory access operation
+
+    ALWAYS_INLINE void loadEffectiveAddress(BaseIndex address, RegisterID dest, int extraoffset = 0)
+    {
+        if (dest == address.base) {
+            RegisterID scaledIndex = claimScratch();
+            move(address.index, scaledIndex);
+            lshift32(TrustedImm32(address.scale), scaledIndex);
+            add32(scaledIndex, dest);
+            releaseScratch(scaledIndex);
+        } else {
+            move(address.index, dest);
+            lshift32(TrustedImm32(address.scale), dest);
+            add32(address.base, dest);
+        }
+
+        add32(TrustedImm32(address.offset + extraoffset), dest);
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        load32(address.base, address.offset, dest);
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        load8(address.base, address.offset, dest);
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(address.base, scr);
+        load8(scr, address.offset, dest);
+        releaseScratch(scr);
+    }
+
+    void load8(AbsoluteAddress address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address.m_ptr), dest);
+        m_assembler.movbMemReg(dest, dest);
+        m_assembler.extub(dest, dest);
+    }
+
+    void load8(const void* address, RegisterID dest)
+    {
+        load8(AbsoluteAddress(address), dest);
+    }
+
+    void load8PostInc(RegisterID base, RegisterID dest)
+    {
+        m_assembler.movbMemRegIn(base, dest);
+        m_assembler.extub(dest, dest);
+    }
+
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(address.base, scr);
+        load8SignedExtendTo32(scr, address.offset, dest);
+        releaseScratch(scr);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(address.base, scr);
+        load32(scr, address.offset, dest);
+        releaseScratch(scr);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address), dest);
+        m_assembler.movlMemReg(dest, dest);
+    }
+
+    void load32(RegisterID base, int offset, RegisterID dest)
+    {
+        if (!offset) {
+            m_assembler.movlMemReg(base, dest);
+            return;
+        }
+
+        if ((offset >= 0) && (offset < 64)) {
+            m_assembler.movlMemReg(offset >> 2, base, dest);
+            return;
+        }
+
+        RegisterID scr = (dest == base) ? claimScratch() : dest;
+
+        m_assembler.loadConstant(offset, scr);
+        if (base == SH4Registers::r0)
+            m_assembler.movlR0mr(scr, dest);
+        else {
+            m_assembler.addlRegReg(base, scr);
+            m_assembler.movlMemReg(scr, dest);
+        }
+
+        if (dest == base)
+            releaseScratch(scr);
+    }
+
+    void load8SignedExtendTo32(RegisterID base, int offset, RegisterID dest)
+    {
+        if (!offset) {
+            m_assembler.movbMemReg(base, dest);
+            return;
+        }
+
+        if ((offset > 0) && (offset <= 15) && (dest == SH4Registers::r0)) {
+            m_assembler.movbMemReg(offset, base, dest);
+            return;
+        }
+
+        RegisterID scr = (dest == base) ? claimScratch() : dest;
+
+        m_assembler.loadConstant(offset, scr);
+        if (base == SH4Registers::r0)
+            m_assembler.movbR0mr(scr, dest);
+        else {
+            m_assembler.addlRegReg(base, scr);
+            m_assembler.movbMemReg(scr, dest);
+        }
+
+        if (dest == base)
+            releaseScratch(scr);
+    }
+
+    void load8(RegisterID base, int offset, RegisterID dest)
+    {
+        load8SignedExtendTo32(base, offset, dest);
+        m_assembler.extub(dest, dest);
+    }
+
+    void load32(RegisterID src, RegisterID dst)
+    {
+        m_assembler.movlMemReg(src, dst);
+    }
+
+    void load16(ImplicitAddress address, RegisterID dest)
+    {
+        if (!address.offset) {
+            m_assembler.movwMemReg(address.base, dest);
+            m_assembler.extuw(dest, dest);
+            return;
+        }
+
+        if ((address.offset > 0) && (address.offset <= 30) && (dest == SH4Registers::r0)) {
+            m_assembler.movwMemReg(address.offset >> 1, address.base, dest);
+            m_assembler.extuw(dest, dest);
+            return;
+        }
+
+        RegisterID scr = (dest == address.base) ? claimScratch() : dest;
+
+        m_assembler.loadConstant(address.offset, scr);
+        if (address.base == SH4Registers::r0)
+            m_assembler.movwR0mr(scr, dest);
+        else {
+            m_assembler.addlRegReg(address.base, scr);
+            m_assembler.movwMemReg(scr, dest);
+        }
+        m_assembler.extuw(dest, dest);
+
+        if (dest == address.base)
+            releaseScratch(scr);
+    }
+
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+
+        loadEffectiveAddress(address, scr);
+
+        RegisterID scr1 = claimScratch();
+        load8PostInc(scr, scr1);
+        load8(scr, dest);
+        m_assembler.shllImm8r(8, dest);
+        or32(scr1, dest);
+
+        releaseScratch(scr);
+        releaseScratch(scr1);
+    }
+
+    void load16(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movwMemReg(src, dest);
+        m_assembler.extuw(dest, dest);
+    }
+
+    void load16SignedExtendTo32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movwMemReg(src, dest);
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        load16SignedExtendTo32(address, dest);
+        m_assembler.extuw(dest, dest);
+    }
+
+    void load16PostInc(RegisterID base, RegisterID dest)
+    {
+        m_assembler.movwMemRegIn(base, dest);
+        m_assembler.extuw(dest, dest);
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(TrustedImm32(address.offset), scr);
+
+        if (address.base == SH4Registers::r0)
+            m_assembler.movwR0mr(scr, dest);
+        else {
+            add32(address.base, scr);
+            load16SignedExtendTo32(scr, dest);
+        }
+
+        releaseScratch(scr);
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+        RegisterID scr = claimScratch();
+
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(TrustedImm32(address.offset), scr);
+
+        if (address.base == SH4Registers::r0)
+            m_assembler.movbRegMemr0(src, scr);
+        else {
+            add32(address.base, scr);
+            m_assembler.movbRegMem(src, scr);
+        }
+
+        releaseScratch(scr);
+    }
+
+    void store8(RegisterID src, void* address)
+    {
+        RegisterID destptr = claimScratch();
+        move(TrustedImmPtr(address), destptr);
+        m_assembler.movbRegMem(src, destptr);
+        releaseScratch(destptr);
+    }
+
+    void store8(TrustedImm32 imm, void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        RegisterID dstptr = claimScratch();
+        move(TrustedImmPtr(address), dstptr);
+        RegisterID srcval = claimScratch();
+        move(imm8, srcval);
+        m_assembler.movbRegMem(srcval, dstptr);
+        releaseScratch(dstptr);
+        releaseScratch(srcval);
+    }
+
+    void store8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        RegisterID dstptr = claimScratch();
+        move(address.base, dstptr);
+        add32(TrustedImm32(address.offset), dstptr);
+        RegisterID srcval = claimScratch();
+        move(imm8, srcval);
+        m_assembler.movbRegMem(srcval, dstptr);
+        releaseScratch(dstptr);
+        releaseScratch(srcval);
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+        RegisterID scr = claimScratch();
+
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(TrustedImm32(address.offset), scr);
+
+        if (address.base == SH4Registers::r0)
+            m_assembler.movwRegMemr0(src, scr);
+        else {
+            add32(address.base, scr);
+            m_assembler.movwRegMem(src, scr);
+        }
+
+        releaseScratch(scr);
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        if (!address.offset) {
+            m_assembler.movlRegMem(src, address.base);
+            return;
+        }
+
+        if ((address.offset >= 0) && (address.offset < 64)) {
+            m_assembler.movlRegMem(src, address.offset >> 2, address.base);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(address.offset, scr);
+        if (address.base == SH4Registers::r0)
+            m_assembler.movlRegMemr0(src, scr);
+        else {
+            m_assembler.addlRegReg(address.base, scr);
+            m_assembler.movlRegMem(src, scr);
+        }
+        releaseScratch(scr);
+    }
+
+    void store32(RegisterID src, RegisterID dst)
+    {
+        m_assembler.movlRegMem(src, dst);
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        store32(scr, address);
+        releaseScratch(scr);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        RegisterID scr = claimScratch();
+
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(address.base, scr);
+        store32(src, Address(scr, address.offset));
+
+        releaseScratch(scr);
+    }
+
+    void store32(TrustedImm32 imm, void* address)
+    {
+        RegisterID scr = claimScratch();
+        RegisterID scr1 = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        move(TrustedImmPtr(address), scr1);
+        m_assembler.movlRegMem(scr, scr1);
+        releaseScratch(scr);
+        releaseScratch(scr1);
+    }
+
+    void store32(RegisterID src, void* address)
+    {
+        RegisterID scr = claimScratch();
+        move(TrustedImmPtr(address), scr);
+        m_assembler.movlRegMem(src, scr);
+        releaseScratch(scr);
+    }
+
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        RegisterID destptr = claimScratch();
+
+        loadEffectiveAddress(address, destptr);
+
+        RegisterID srcval = claimScratch();
+        move(imm, srcval);
+        m_assembler.movlRegMem(srcval, destptr);
+        releaseScratch(srcval);
+        releaseScratch(destptr);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        DataLabel32 label(this);
+        m_assembler.loadConstantUnReusable(address.offset, scr);
+        m_assembler.addlRegReg(address.base, scr);
+        m_assembler.movlMemReg(scr, dest);
+        releaseScratch(scr);
+        return label;
+    }
+    
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        RegisterID scr = claimScratch();
+        DataLabel32 label(this);
+        m_assembler.loadConstantUnReusable(address.offset, scr);
+        m_assembler.addlRegReg(address.base, scr);
+        m_assembler.movlRegMem(src, scr);
+        releaseScratch(scr);
+        return label;
+    }
+
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabelCompact dataLabel(this);
+        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+        m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
+        return dataLabel;
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result(this);
+
+        RegisterID scr = claimScratch();
+        m_assembler.movImm8(address.offset, scr);
+        m_assembler.addlRegReg(address.base, scr);
+        m_assembler.movlMemReg(scr, dest);
+        releaseScratch(scr);
+
+        return result;
+    }
+
+    // Floating-point operations
+
+    static bool supportsFloatingPoint() { return true; }
+    static bool supportsFloatingPointTruncate() { return true; }
+    static bool supportsFloatingPointSqrt() { return true; }
+    static bool supportsFloatingPointAbs() { return true; }
+    static bool supportsFloatingPointRounding() { return false; }
+
+    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        m_assembler.fldsfpul((FPRegisterID)(src + 1));
+        m_assembler.stsfpulReg(dest1);
+        m_assembler.fldsfpul(src);
+        m_assembler.stsfpulReg(dest2);
+    }
+
+    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+    {
+        m_assembler.ldsrmfpul(src1);
+        m_assembler.fstsfpul((FPRegisterID)(dest + 1));
+        m_assembler.ldsrmfpul(src2);
+        m_assembler.fstsfpul(dest);
+    }
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        if (src != dest) {
+            m_assembler.fmovsRegReg((FPRegisterID)(src + 1), (FPRegisterID)(dest + 1));
+            m_assembler.fmovsRegReg(src, dest);
+        }
+    }
+
+    void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+    {
+        if (fr1 != fr2) {
+            m_assembler.fldsfpul((FPRegisterID)(fr1 + 1));
+            m_assembler.fmovsRegReg((FPRegisterID)(fr2 + 1), (FPRegisterID)(fr1 + 1));
+            m_assembler.fstsfpul((FPRegisterID)(fr2 + 1));
+            m_assembler.fldsfpul(fr1);
+            m_assembler.fmovsRegReg(fr2, fr1);
+            m_assembler.fstsfpul(fr2);
+        }
+    }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+
+        loadEffectiveAddress(address, scr);
+
+        m_assembler.fmovsReadrm(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+
+        loadEffectiveAddress(address, scr);
+
+        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+        m_assembler.fmovsReadrm(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+
+        m_assembler.loadConstant(address.offset, scr);
+        if (address.base == SH4Registers::r0) {
+            m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
+            m_assembler.addlImm8r(4, scr);
+            m_assembler.fmovsReadr0r(scr, dest);
+            releaseScratch(scr);
+            return;
+        }
+
+        m_assembler.addlRegReg(address.base, scr);
+        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+        m_assembler.fmovsReadrm(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        move(address, scr);
+        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+        m_assembler.fmovsReadrm(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        RegisterID scr = claimScratch();
+        loadEffectiveAddress(address, scr);
+        m_assembler.fmovsWriterm(src, scr);
+        releaseScratch(scr);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(address.offset + 8, scr);
+        m_assembler.addlRegReg(address.base, scr);
+        m_assembler.fmovsWriterndec(src, scr);
+        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+        releaseScratch(scr);
+    }
+
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        RegisterID scr = claimScratch();
+
+        loadEffectiveAddress(address, scr, 8);
+
+        m_assembler.fmovsWriterndec(src, scr);
+        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+
+        releaseScratch(scr);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (op1 == dest)
+            addDouble(op2, dest);
+        else {
+            moveDouble(op2, dest);
+            addDouble(op1, dest);
+        }
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(reinterpret_cast(const_cast(address.m_value)) + 8, scr);
+        m_assembler.fmovsWriterndec(src, scr);
+        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
+        releaseScratch(scr);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.daddRegReg(src, dest);
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        loadDouble(TrustedImmPtr(address.m_ptr), fscratch);
+        addDouble(fscratch, dest);
+    }
+
+    void addDouble(Address address, FPRegisterID dest)
+    {
+        loadDouble(address, fscratch);
+        addDouble(fscratch, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.dsubRegReg(src, dest);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (op2 == dest) {
+            moveDouble(op1, fscratch);
+            subDouble(op2, fscratch);
+            moveDouble(fscratch, dest);
+        } else {
+            moveDouble(op1, dest);
+            subDouble(op2, dest);
+        }
+    }
+
+    void subDouble(Address address, FPRegisterID dest)
+    {
+        loadDouble(address, fscratch);
+        subDouble(fscratch, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.dmulRegReg(src, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (op1 == dest)
+            mulDouble(op2, dest);
+        else {
+            moveDouble(op2, dest);
+            mulDouble(op1, dest);
+        }
+    }
+
+    void mulDouble(Address address, FPRegisterID dest)
+    {
+        loadDouble(address, fscratch);
+        mulDouble(fscratch, dest);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ddivRegReg(src, dest);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (op2 == dest) {
+            moveDouble(op1, fscratch);
+            divDouble(op2, fscratch);
+            moveDouble(fscratch, dest);
+        } else {
+            moveDouble(op1, dest);
+            divDouble(op2, dest);
+        }
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        moveDouble(src, dest);
+        m_assembler.dneg(dest);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.fldsfpul(src);
+        m_assembler.dcnvsd(dst);
+    }
+
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.dcnvds(src);
+        m_assembler.fstsfpul(dst);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.ldsrmfpul(src);
+        m_assembler.floatfpulDreg(dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src.m_ptr, scr);
+        convertInt32ToDouble(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        load32(src, scr);
+        convertInt32ToDouble(scr, dest);
+        releaseScratch(scr);
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        RegisterID scr = claimScratch();
+        Jump m_jump;
+        JumpList end;
+
+        loadEffectiveAddress(address, scr);
+
+        RegisterID scr1 = claimScratch();
+        if (dest != SH4Registers::r0)
+            move(SH4Registers::r0, scr1);
+
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 58, sizeof(uint32_t));
+        move(scr, SH4Registers::r0);
+        m_assembler.testlImm8r(0x3, SH4Registers::r0);
+        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+
+        if (dest != SH4Registers::r0)
+            move(scr1, SH4Registers::r0);
+
+        load32(scr, dest);
+        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+        m_assembler.nop();
+        m_jump.link(this);
+        m_assembler.testlImm8r(0x1, SH4Registers::r0);
+
+        if (dest != SH4Registers::r0)
+            move(scr1, SH4Registers::r0);
+
+        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+        load16PostInc(scr, scr1);
+        load16(scr, dest);
+        m_assembler.shllImm8r(16, dest);
+        or32(scr1, dest);
+        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+        m_assembler.nop();
+        m_jump.link(this);
+        load8PostInc(scr, scr1);
+        load16PostInc(scr, dest);
+        m_assembler.shllImm8r(8, dest);
+        or32(dest, scr1);
+        load8(scr, dest);
+        m_assembler.shllImm8r(8, dest);
+        m_assembler.shllImm8r(16, dest);
+        or32(scr1, dest);
+        end.link(this);
+
+        releaseScratch(scr);
+        releaseScratch(scr1);
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        RegisterID scr = scratchReg3;
+        load32WithUnalignedHalfWords(left, scr);
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+            m_assembler.testlRegReg(scr, scr);
+        else
+            compare32(right.m_value, scr, cond);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.movImm8(0, scratchReg3);
+        convertInt32ToDouble(scratchReg3, scratch);
+        return branchDouble(DoubleNotEqual, reg, scratch);
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.movImm8(0, scratchReg3);
+        convertInt32ToDouble(scratchReg3, scratch);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        if (cond == DoubleEqual) {
+            m_assembler.dcmppeq(right, left);
+            return branchTrue();
+        }
+
+        if (cond == DoubleNotEqual) {
+            JumpList end;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, left);
+            Jump m_jump = branchFalse();
+            end.link(this);
+            return m_jump;
+        }
+
+        if (cond == DoubleGreaterThan) {
+            m_assembler.dcmppgt(right, left);
+            return branchTrue();
+        }
+
+        if (cond == DoubleGreaterThanOrEqual) {
+            JumpList end;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppgt(left, right);
+            Jump m_jump = branchFalse();
+            end.link(this);
+            return m_jump;
+        }
+
+        if (cond == DoubleLessThan) {
+            m_assembler.dcmppgt(left, right);
+            return branchTrue();
+        }
+
+        if (cond == DoubleLessThanOrEqual) {
+            JumpList end;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppgt(right, left);
+            Jump m_jump = branchFalse();
+            end.link(this);
+            return m_jump;
+        }
+
+        if (cond == DoubleEqualOrUnordered) {
+            JumpList takeBranch;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(left, right);
+            m_assembler.branch(BF_OPCODE, 2);
+            takeBranch.link(this);
+            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+        }
+
+        if (cond == DoubleGreaterThanOrUnordered) {
+            JumpList takeBranch;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppgt(right, left);
+            m_assembler.branch(BF_OPCODE, 2);
+            takeBranch.link(this);
+            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+        }
+
+        if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+            m_assembler.dcmppgt(left, right);
+            return branchFalse();
+        }
+
+        if (cond == DoubleLessThanOrUnordered) {
+            JumpList takeBranch;
+            m_assembler.dcmppeq(left, left);
+            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppeq(right, right);
+            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
+            m_assembler.dcmppgt(left, right);
+            m_assembler.branch(BF_OPCODE, 2);
+            takeBranch.link(this);
+            return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+        }
+
+        if (cond == DoubleLessThanOrEqualOrUnordered) {
+            m_assembler.dcmppgt(right, left);
+            return branchFalse();
+        }
+
+        ASSERT(cond == DoubleNotEqualOrUnordered);
+        m_assembler.dcmppeq(right, left);
+        return branchFalse();
+    }
+
+    Jump branchTrue()
+    {
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+        m_assembler.branch(BF_OPCODE, 2);
+        return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+    }
+
+    Jump branchFalse()
+    {
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+        m_assembler.branch(BT_OPCODE, 2);
+        return Jump(m_assembler.extraInstrForBranch(scratchReg3));
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        RegisterID scr = claimScratch();
+        move(left.index, scr);
+        lshift32(TrustedImm32(left.scale), scr);
+        add32(left.base, scr);
+        load32(scr, left.offset, scr);
+        compare32(right.m_value, scr, cond);
+        releaseScratch(scr);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        moveDouble(src, dest);
+        m_assembler.dsqrt(dest);
+    }
+    
+    void absDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        moveDouble(src, dest);
+        m_assembler.dabs(dest);
+    }
+
+    NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID)
+    {
+        ASSERT(!supportsFloatingPointRounding());
+        CRASH();
+    }
+
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        RegisterID addressTempRegister = claimScratch();
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        Jump jmp = branchTest32(cond, addressTempRegister, mask8);
+        releaseScratch(addressTempRegister);
+        return jmp;
+    }
+
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        RegisterID addressTempRegister = claimScratch();
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister);
+        Jump jmp = branchTest32(cond, addressTempRegister, mask8);
+        releaseScratch(addressTempRegister);
+        return jmp;
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+        RegisterID addressTempRegister = claimScratch();
+        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister);
+        Jump jmp = branchTest32(cond, addressTempRegister, mask8);
+        releaseScratch(addressTempRegister);
+        return jmp;
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        RegisterID addressTempRegister = claimScratch();
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        Jump jmp = branch32(cond, addressTempRegister, right8);
+        releaseScratch(addressTempRegister);
+        return jmp;
+    }
+
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        RegisterID addressTempRegister = claimScratch();
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        Jump jmp = branch32(cond, addressTempRegister, right8);
+        releaseScratch(addressTempRegister);
+        return jmp;
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        RegisterID addressTempRegister = claimScratch();
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister);
+        compare32(cond, addressTempRegister, right8, dest);
+        releaseScratch(addressTempRegister);
+    }
+
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        Jump result;
+        truncateDoubleToInt32(src, dest);
+        RegisterID intscr = claimScratch();
+        m_assembler.loadConstant(0x7fffffff, intscr);
+        m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 12, sizeof(uint32_t));
+        if (branchType == BranchIfTruncateFailed) {
+            m_assembler.branch(BT_OPCODE, 2);
+            m_assembler.addlImm8r(1, intscr);
+            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+            result = branchTrue();
+        } else {
+            Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
+            m_assembler.addlImm8r(1, intscr);
+            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+            result = branchFalse();
+            out.link(this);
+        }
+        releaseScratch(intscr);
+        return result;
+    }
+
+    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        Jump result;
+        RegisterID intscr = claimScratch();
+        m_assembler.loadConstant(0x80000000, intscr);
+        convertInt32ToDouble(intscr, fscratch);
+        addDouble(src, fscratch);
+        truncateDoubleToInt32(fscratch, dest);
+        m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 16, sizeof(uint32_t));
+        if (branchType == BranchIfTruncateFailed) {
+            m_assembler.branch(BT_OPCODE, 4);
+            m_assembler.addlImm8r(-1, intscr);
+            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+            m_assembler.addlImm8r(1, intscr);
+            m_assembler.sublRegReg(intscr, dest);
+            result = branchTrue();
+        } else {
+            Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear);
+            m_assembler.addlImm8r(-1, intscr);
+            m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal));
+            m_assembler.addlImm8r(1, intscr);
+            m_assembler.sublRegReg(intscr, dest);
+            result = branchFalse();
+            out.link(this);
+        }
+        releaseScratch(intscr);
+        return result;
+    }
+
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.ftrcdrmfpul(src);
+        m_assembler.stsfpulReg(dest);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        RegisterID intscr = claimScratch();
+        m_assembler.loadConstant(0x80000000, intscr);
+        convertInt32ToDouble(intscr, fscratch);
+        addDouble(src, fscratch);
+        m_assembler.ftrcdrmfpul(fscratch);
+        m_assembler.stsfpulReg(dest);
+        m_assembler.sublRegReg(intscr, dest);
+        releaseScratch(intscr);
+    }
+
+    // Stack manipulation operations
+
+    void pop(RegisterID dest)
+    {
+        m_assembler.popReg(dest);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.pushReg(src);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        RegisterID scr = claimScratch();
+        m_assembler.loadConstant(imm.m_value, scr);
+        push(scr);
+        releaseScratch(scr);
+    }
+
+    // Register move operations
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.loadConstant(imm.m_value, dest);
+    }
+
+    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+    {
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+        DataLabelPtr dataLabel(this);
+        m_assembler.loadConstantUnReusable(reinterpret_cast(initialValue.m_value), dest);
+        return dataLabel;
+    }
+
+    DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+    {
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+        DataLabel32 dataLabel(this);
+        m_assembler.loadConstantUnReusable(static_cast(initialValue.m_value), dest);
+        return dataLabel;
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.movlRegReg(src, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        m_assembler.loadConstant(imm.asIntptr(), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2) {
+            xor32(reg1, reg2);
+            xor32(reg2, reg1);
+            xor32(reg1, reg2);
+        }
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+        if (cond != NotEqual) {
+            m_assembler.movt(dest);
+            return;
+        }
+
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+        m_assembler.movImm8(0, dest);
+        m_assembler.branch(BT_OPCODE, 0);
+        m_assembler.movImm8(1, dest);
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        if (left != dest) {
+            move(right, dest);
+            compare32(cond, left, dest, dest);
+            return;
+        }
+
+        RegisterID scr = claimScratch();
+        move(right, scr);
+        compare32(cond, left, scr, dest);
+        releaseScratch(scr);
+    }
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dest);
+        if ((mask8.m_value & 0xff) == 0xff)
+            compare32(0, dest, static_cast(cond));
+        else
+            testlImm(mask8.m_value, dest);
+        if (cond != NonZero) {
+            m_assembler.movt(dest);
+            return;
+        }
+
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+        m_assembler.movImm8(0, dest);
+        m_assembler.branch(BT_OPCODE, 0);
+        m_assembler.movImm8(1, dest);
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        load32(address, dest);
+        if (mask.m_value == -1)
+            compare32(0, dest, static_cast(cond));
+        else
+            testlImm(mask.m_value, dest);
+        if (cond != NonZero) {
+            m_assembler.movt(dest);
+            return;
+        }
+
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+        m_assembler.movImm8(0, dest);
+        m_assembler.branch(BT_OPCODE, 0);
+        m_assembler.movImm8(1, dest);
+    }
+
+    void loadPtrLinkReg(ImplicitAddress address)
+    {
+        RegisterID scr = claimScratch();
+        load32(address, scr);
+        m_assembler.ldspr(scr);
+        releaseScratch(scr);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+        /* BT label => BF off
+           nop         LDR reg
+           nop         braf @reg
+           nop         nop
+         */
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+            m_assembler.testlRegReg(left, left);
+        else
+            compare32(right.m_value, left, cond);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        compare32(right.offset, right.base, left, cond);
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        compare32(right, left.offset, left.base, cond);
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        compare32(right.m_value, left.offset, left.base, cond);
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        RegisterID scr = claimScratch();
+
+        load32(left.m_ptr, scr);
+        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+        releaseScratch(scr);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        RegisterID addressTempRegister = claimScratch();
+
+        move(TrustedImmPtr(left.m_ptr), addressTempRegister);
+        m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
+        compare32(right.m_value, addressTempRegister, cond);
+        releaseScratch(addressTempRegister);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+        RegisterID lefttmp = claimScratch();
+
+        loadEffectiveAddress(left, lefttmp);
+
+        MacroAssemblerHelpers::load8OnCondition(*this, cond, lefttmp, lefttmp);
+        RegisterID righttmp = claimScratch();
+        m_assembler.loadConstant(right8.m_value, righttmp);
+
+        Jump result = branch32(cond, lefttmp, righttmp);
+        releaseScratch(lefttmp);
+        releaseScratch(righttmp);
+        return result;
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        m_assembler.testlRegReg(reg, mask);
+
+        if (cond == NonZero) // NotEqual
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        if (mask.m_value == -1)
+            m_assembler.testlRegReg(reg, reg);
+        else
+            testlImm(mask.m_value, reg);
+
+        if (cond == NonZero) // NotEqual
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        if (mask.m_value == -1)
+            compare32(0, address.offset, address.base, static_cast(cond));
+        else
+            testImm(mask.m_value, address.offset, address.base);
+
+        if (cond == NonZero) // NotEqual
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+
+        RegisterID scr = claimScratch();
+
+        move(address.index, scr);
+        lshift32(TrustedImm32(address.scale), scr);
+        add32(address.base, scr);
+        load32(scr, address.offset, scr);
+
+        if (mask.m_value == -1)
+            m_assembler.testlRegReg(scr, scr);
+        else
+            testlImm(mask.m_value, scr);
+
+        releaseScratch(scr);
+
+        if (cond == NonZero) // NotEqual
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump jump()
+    {
+        return Jump(m_assembler.jmp());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.jmpReg(target);
+    }
+
+    void jump(Address address)
+    {
+        RegisterID scr = claimScratch();
+        load32(address, scr);
+        m_assembler.jmpReg(scr);
+        releaseScratch(scr);
+    }
+
+    void jump(AbsoluteAddress address)
+    {
+        RegisterID scr = claimScratch();
+
+        move(TrustedImmPtr(address.m_ptr), scr);
+        m_assembler.movlMemReg(scr, scr);
+        m_assembler.jmpReg(scr);
+        releaseScratch(scr);
+    }
+
+    // Arithmetic control flow operations
+
+    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow)
+            return branchMul32(cond, srcDest, TrustedImm32(-1), srcDest);
+
+        neg32(srcDest);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(srcDest);
+            return branchFalse();
+        }
+
+        compare32(0, srcDest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            m_assembler.addvlRegReg(src, dest);
+            return branchTrue();
+        }
+
+        m_assembler.addlRegReg(src, dest);
+
+        if ((cond == Signed) || (cond == PositiveOrZero)) {
+            m_assembler.cmppz(dest);
+            return (cond == Signed) ? branchFalse() : branchTrue();
+        }
+
+        compare32(0, dest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            if (src1 == dest)
+                m_assembler.addvlRegReg(src2, dest);
+            else {
+                move(src2, dest);
+                m_assembler.addvlRegReg(src1, dest);
+            }
+            return branchTrue();
+        }
+
+        add32(src1, src2, dest);
+
+        if ((cond == Signed) || (cond == PositiveOrZero)) {
+            m_assembler.cmppz(dest);
+            return (cond == Signed) ? branchFalse() : branchTrue();
+        }
+
+        compare32(0, dest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+        RegisterID immval = claimScratch();
+        move(imm, immval);
+        Jump result = branchAdd32(cond, immval, dest);
+        releaseScratch(immval);
+        return result;
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+        move(src, dest);
+
+        if (cond == Overflow) {
+            move(imm, scratchReg3);
+            m_assembler.addvlRegReg(scratchReg3, dest);
+            return branchTrue();
+        }
+
+        add32(imm, dest);
+
+        if ((cond == Signed) || (cond == PositiveOrZero)) {
+            m_assembler.cmppz(dest);
+            return (cond == Signed) ? branchFalse() : branchTrue();
+        }
+
+        compare32(0, dest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+        bool result;
+
+        move(imm, scratchReg3);
+        RegisterID destptr = claimScratch();
+        RegisterID destval = claimScratch();
+        move(TrustedImmPtr(dest.m_ptr), destptr);
+        m_assembler.movlMemReg(destptr, destval);
+        if (cond == Overflow) {
+            m_assembler.addvlRegReg(scratchReg3, destval);
+            result = true;
+        } else {
+            m_assembler.addlRegReg(scratchReg3, destval);
+            if ((cond == Signed) || (cond == PositiveOrZero)) {
+                m_assembler.cmppz(destval);
+                result = (cond == PositiveOrZero);
+            } else {
+                m_assembler.testlRegReg(destval, destval);
+                result = (cond != NonZero);
+            }
+        }
+        m_assembler.movlRegMem(destval, destptr);
+        releaseScratch(destval);
+        releaseScratch(destptr);
+        return result ? branchTrue() : branchFalse();
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            RegisterID srcVal = claimScratch();
+            load32(src, srcVal);
+            m_assembler.addvlRegReg(srcVal, dest);
+            releaseScratch(srcVal);
+            return branchTrue();
+        }
+
+        add32(src, dest);
+
+        if ((cond == Signed) || (cond == PositiveOrZero)) {
+            m_assembler.cmppz(dest);
+            return (cond == Signed) ? branchFalse() : branchTrue();
+        }
+
+        compare32(0, dest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            RegisterID scrsign = claimScratch();
+            RegisterID msbres = claimScratch();
+            m_assembler.dmulslRegReg(src, dest);
+            m_assembler.stsmacl(dest);
+            m_assembler.cmppz(dest);
+            m_assembler.movt(scrsign);
+            m_assembler.addlImm8r(-1, scrsign);
+            m_assembler.stsmach(msbres);
+            m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
+            releaseScratch(msbres);
+            releaseScratch(scrsign);
+            return branchFalse();
+        }
+
+        mul32(src, dest);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(dest);
+            return branchFalse();
+        }
+
+        compare32(0, dest, static_cast(cond));
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            RegisterID scrsign = claimScratch();
+            RegisterID msbres = claimScratch();
+            m_assembler.dmulslRegReg(src1, src2);
+            m_assembler.stsmacl(dest);
+            m_assembler.cmppz(dest);
+            m_assembler.movt(scrsign);
+            m_assembler.addlImm8r(-1, scrsign);
+            m_assembler.stsmach(msbres);
+            m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
+            releaseScratch(msbres);
+            releaseScratch(scrsign);
+            return branchFalse();
+        }
+
+        mul32(src1, src2, dest);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(dest);
+            return branchFalse();
+        }
+
+        compare32(0, dest, Equal);
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (src == dest) {
+            move(imm, scratchReg3);
+            return branchMul32(cond, scratchReg3, dest);
+        }
+
+        move(imm, dest);
+        return branchMul32(cond, src, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (cond == Overflow) {
+            m_assembler.subvlRegReg(src, dest);
+            return branchTrue();
+        }
+
+        sub32(src, dest);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(dest);
+            return branchFalse();
+        }
+
+        compare32(0, dest, static_cast(cond));
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        RegisterID immval = claimScratch();
+        move(imm, immval);
+        Jump result = branchSub32(cond, immval, dest);
+        releaseScratch(immval);
+        return result;
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        move(src, dest);
+        return branchSub32(cond, imm, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        if (src2 != dest) {
+            move(src1, dest);
+            return branchSub32(cond, src2, dest);
+        }
+
+        if (cond == Overflow) {
+            RegisterID tmpval = claimScratch();
+            move(src1, tmpval);
+            m_assembler.subvlRegReg(src2, tmpval);
+            move(tmpval, dest);
+            releaseScratch(tmpval);
+            return branchTrue();
+        }
+
+        RegisterID tmpval = claimScratch();
+        move(src1, tmpval);
+        sub32(src2, tmpval);
+        move(tmpval, dest);
+        releaseScratch(tmpval);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(dest);
+            return branchFalse();
+        }
+
+        compare32(0, dest, static_cast(cond));
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+        or32(src, dest);
+
+        if (cond == Signed) {
+            m_assembler.cmppz(dest);
+            return branchFalse();
+        }
+
+        compare32(0, dest, static_cast(cond));
+        return (cond == NonZero) ? branchFalse() : branchTrue();
+    }
+
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
+    {
+        truncateDoubleToInt32(src, dest);
+        convertInt32ToDouble(dest, fscratch);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
+
+        if (negZeroCheck)
+            failureCases.append(branch32(Equal, dest, TrustedImm32(0)));
+    }
+
+    void neg32(RegisterID dst)
+    {
+        m_assembler.neg(dst, dst);
+    }
+
+    void urshift32(RegisterID shiftamount, RegisterID dest)
+    {
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(0x1f, shiftTmp);
+        m_assembler.andlRegReg(shiftamount, shiftTmp);
+        m_assembler.neg(shiftTmp, shiftTmp);
+        m_assembler.shldRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+    {
+        move(src, dest);
+        urshift32(shiftAmount, dest);
+    }
+
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        int immMasked = imm.m_value & 0x1f;
+        if (!immMasked)
+            return;
+
+        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
+            m_assembler.shlrImm8r(immMasked, dest);
+            return;
+        }
+
+        RegisterID shiftTmp = claimScratch();
+        m_assembler.loadConstant(-immMasked, shiftTmp);
+        m_assembler.shldRegReg(dest, shiftTmp);
+        releaseScratch(shiftTmp);
+    }
+
+    void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+    {
+        move(src, dest);
+        urshift32(shiftamount, dest);
+    }
+
+    Call call()
+    {
+        return Call(m_assembler.call(), Call::Linkable);
+    }
+
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jump(), Call::LinkableNearTail);
+    }
+
+    Call nearCall()
+    {
+        return Call(m_assembler.call(), Call::LinkableNear);
+    }
+
+    Call call(RegisterID target)
+    {
+        return Call(m_assembler.call(target), Call::None);
+    }
+
+    void call(Address address)
+    {
+        RegisterID target = claimScratch();
+        load32(address.base, address.offset, target);
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+        m_assembler.branch(JSR_OPCODE, target);
+        m_assembler.nop();
+        releaseScratch(target);
+    }
+
+    void breakpoint()
+    {
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+        m_assembler.bkpt();
+        m_assembler.nop();
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        RegisterID dataTempRegister = claimScratch();
+
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
+        releaseScratch(dataTempRegister);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        RegisterID scr = claimScratch();
+
+        m_assembler.loadConstant(left.offset, scr);
+        m_assembler.addlRegReg(left.base, scr);
+        m_assembler.movlMemReg(scr, scr);
+        RegisterID scr1 = claimScratch();
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+        dataLabel = moveWithPatch(initialRightValue, scr1);
+        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+        releaseScratch(scr);
+        releaseScratch(scr1);
+
+        if (cond == NotEqual)
+            return branchFalse();
+        return branchTrue();
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        RegisterID scr = claimScratch();
+
+        m_assembler.loadConstant(left.offset, scr);
+        m_assembler.addlRegReg(left.base, scr);
+        m_assembler.movlMemReg(scr, scr);
+        RegisterID scr1 = claimScratch();
+        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t));
+        dataLabel = moveWithPatch(initialRightValue, scr1);
+        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+        releaseScratch(scr);
+        releaseScratch(scr1);
+
+        return (cond == NotEqual) ? branchFalse() : branchTrue();
+    }
+
+    void ret()
+    {
+        m_assembler.ret();
+        m_assembler.nop();
+    }
+
+    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        RegisterID scr = claimScratch();
+        DataLabelPtr label = moveWithPatch(initialValue, scr);
+        store32(scr, address);
+        releaseScratch(scr);
+        return label;
+    }
+
+    DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+    int sizeOfConstantPool()
+    {
+        return m_assembler.sizeOfConstantPool();
+    }
+
+    Call tailRecursiveCall()
+    {
+        RegisterID scr = claimScratch();
+
+        m_assembler.loadConstantUnReusable(0x0, scr, true);
+        Jump m_jump = Jump(m_assembler.jmp(scr));
+        releaseScratch(scr);
+
+        return Call::fromTailJump(m_jump);
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+
+    void memoryFence()
+    {
+        m_assembler.synco();
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), SH4Registers::r0);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), SH4Registers::r1);
+        abortWithReason(reason);
+    }
+
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(reinterpret_cast(SH4Assembler::readCallTarget(call.dataLocation())));
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        SH4Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return SH4Assembler::maxJumpReplacementSize();
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        return label.labelAtOffset(0);
+    }
+
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+    {
+        SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart.dataLocation(), rd, reinterpret_cast(initialValue));
+    }
+
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+        return CodeLocationLabel();
+    }
+
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+    {
+        UNREACHABLE_FOR_PLATFORM();
+    }
+
+protected:
+    SH4Assembler::Condition SH4Condition(RelationalCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    SH4Assembler::Condition SH4Condition(ResultCondition cond)
+    {
+        return static_cast(cond);
+    }
+private:
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            SH4Assembler::linkJump(code, call.m_label, function.value());
+        else
+            SH4Assembler::linkCall(code, call.m_label, function.value());
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerX86.h b/assembler/MacroAssemblerX86.h
new file mode 100644
index 0000000..75f3545
--- /dev/null
+++ b/assembler/MacroAssemblerX86.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(X86)
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+    static const unsigned numGPRs = 8;
+    static const unsigned numFPRs = 8;
+    
+    static const Scale ScalePtr = TimesFour;
+
+    using MacroAssemblerX86Common::add32;
+    using MacroAssemblerX86Common::and32;
+    using MacroAssemblerX86Common::branchAdd32;
+    using MacroAssemblerX86Common::branchSub32;
+    using MacroAssemblerX86Common::sub32;
+    using MacroAssemblerX86Common::or32;
+    using MacroAssemblerX86Common::load32;
+    using MacroAssemblerX86Common::load8;
+    using MacroAssemblerX86Common::store32;
+    using MacroAssemblerX86Common::store8;
+    using MacroAssemblerX86Common::branch32;
+    using MacroAssemblerX86Common::call;
+    using MacroAssemblerX86Common::jump;
+    using MacroAssemblerX86Common::addDouble;
+    using MacroAssemblerX86Common::loadDouble;
+    using MacroAssemblerX86Common::storeDouble;
+    using MacroAssemblerX86Common::convertInt32ToDouble;
+    using MacroAssemblerX86Common::branch8;
+    using MacroAssemblerX86Common::branchTest8;
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.leal_mr(imm.m_value, src, dest);
+    }
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.addl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void add32(AbsoluteAddress address, RegisterID dest)
+    {
+        m_assembler.addl_mr(address.m_ptr, dest);
+    }
+    
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.addl_im(imm.m_value, address.m_ptr);
+        m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast(address.m_ptr) + sizeof(int32_t));
+    }
+
+    void and32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.andl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.orl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void or32(RegisterID reg, AbsoluteAddress address)
+    {
+        m_assembler.orl_rm(reg, address.m_ptr);
+    }
+    
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.subl_im(imm.m_value, address.m_ptr);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address, dest);
+    }
+    
+    void load8(const void* address, RegisterID dest)
+    {
+        m_assembler.movzbl_mr(address, dest);
+    }
+
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), X86Registers::eax);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm32(misc), X86Registers::edx);
+        abortWithReason(reason);
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+        m_assembler.movl_mr(address.offset, address.base, dest);
+        return result;
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        m_assembler.addsd_mr(address.m_ptr, dest);
+    }
+
+    void storeDouble(FPRegisterID src, TrustedImmPtr address)
+    {
+        ASSERT(isSSE2Present());
+        ASSERT(address.m_value);
+        m_assembler.movsd_rm(src, address.m_value);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+    }
+
+    void store32(TrustedImm32 imm, void* address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address);
+    }
+
+    void store32(RegisterID src, void* address)
+    {
+        m_assembler.movl_rm(src, address);
+    }
+    
+    void store8(RegisterID src, void* address)
+    {
+        m_assembler.movb_rm(src, address);
+    }
+
+    void store8(TrustedImm32 imm, void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address);
+    }
+    
+    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.pextrw_irr(3, src, dest1);
+        m_assembler.pextrw_irr(2, src, dest2);
+        lshift32(TrustedImm32(16), dest1);
+        or32(dest1, dest2);
+        moveFloatTo32(src, dest1);
+    }
+
+    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+    {
+        move32ToFloat(src1, dest);
+        move32ToFloat(src2, scratch);
+        lshiftPacked(TrustedImm32(32), scratch);
+        orPacked(scratch, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        m_assembler.addl_im(imm.m_value, dest.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+    {
+        m_assembler.subl_im(imm.m_value, dest.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        m_assembler.cmpl_rm(right, left.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Call call()
+    {
+        return Call(m_assembler.call(), Call::Linkable);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(AbsoluteAddress address)
+    {
+        m_assembler.jmp_m(address.m_ptr);
+    }
+
+    Call tailRecursiveCall()
+    {
+        return Call::fromTailJump(jump());
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        return Call::fromTailJump(oldJump);
+    }
+
+
+    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+        return DataLabelPtr(this);
+    }
+    
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
+            m_assembler.cmpb_im(0, address.m_ptr);
+        else
+            m_assembler.testb_im(mask8.m_value, address.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        padBeforePatch();
+        m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+        dataLabel = DataLabelPtr(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        padBeforePatch();
+        m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+        dataLabel = DataLabelPtr(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        padBeforePatch();
+        m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base);
+        dataLabel = DataLabel32(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        padBeforePatch();
+        m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+        return DataLabelPtr(this);
+    }
+
+    static bool supportsFloatingPoint() { return isSSE2Present(); }
+    static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
+    static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
+    static bool supportsFloatingPointAbs() { return isSSE2Present(); }
+    
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        intptr_t offset = reinterpret_cast(call.dataLocation())[-1];
+        return FunctionPtr(reinterpret_cast(reinterpret_cast(call.dataLocation()) + offset));
+    }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
+    
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        const int immediateBytes = 4;
+        const int totalBytes = opcodeBytes + modRMBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+    {
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        const int offsetBytes = 0;
+        const int immediateBytes = 4;
+        const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+    {
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        const int offsetBytes = 0;
+        const int immediateBytes = 4;
+        const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+    {
+        X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), reg);
+    }
+
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue)
+    {
+        ASSERT(!address.offset);
+        X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), 0, address.base);
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue)
+    {
+        ASSERT(!address.offset);
+        X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base);
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+private:
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
+        else
+            X86Assembler::linkCall(code, call.m_label, function.value());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerX86Common.cpp b/assembler/MacroAssemblerX86Common.cpp
new file mode 100644
index 0000000..528c60f
--- /dev/null
+++ b/assembler/MacroAssemblerX86Common.cpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+#include "MacroAssemblerX86Common.h"
+
+#include 
+
+namespace JSC {
+
+#if ENABLE(MASM_PROBE)
+
+extern "C" void ctiMasmProbeTrampoline();
+
+#if COMPILER(GCC_OR_CLANG)
+
+// The following are offsets for MacroAssemblerX86Common::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#if CPU(X86)
+#define PTR_SIZE 4
+#else // CPU(X86_64)
+#define PTR_SIZE 8
+#endif
+
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPR_OFFSET (3 * PTR_SIZE)
+#define PROBE_CPU_EAX_OFFSET (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE))
+#define PROBE_CPU_ECX_OFFSET (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE))
+#define PROBE_CPU_EDX_OFFSET (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE))
+#define PROBE_CPU_EBX_OFFSET (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE))
+#define PROBE_CPU_ESP_OFFSET (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE))
+#define PROBE_CPU_EBP_OFFSET (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE))
+#define PROBE_CPU_ESI_OFFSET (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE))
+#define PROBE_CPU_EDI_OFFSET (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE))
+
+#if CPU(X86)
+#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE))
+#else // CPU(X86_64)
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE))
+#define PROBE_CPU_R12_OFFSET (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE))
+#define PROBE_CPU_R13_OFFSET (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE))
+#define PROBE_CPU_R14_OFFSET (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE))
+#define PROBE_CPU_R15_OFFSET (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE))
+#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE))
+#endif // CPU(X86_64)
+
+#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE))
+#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE))
+#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE))
+
+#define XMM_SIZE 8
+#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE))
+#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE))
+#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE))
+#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE))
+#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE))
+#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE))
+#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE))
+#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE))
+
+#if CPU(X86)
+#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE)
+#else // CPU(X86_64)
+#define PROBE_CPU_XMM8_OFFSET (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE))
+#define PROBE_CPU_XMM9_OFFSET (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE))
+#define PROBE_CPU_XMM10_OFFSET (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE))
+#define PROBE_CPU_XMM11_OFFSET (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE))
+#define PROBE_CPU_XMM12_OFFSET (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE))
+#define PROBE_CPU_XMM13_OFFSET (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE))
+#define PROBE_CPU_XMM14_OFFSET (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE))
+#define PROBE_CPU_XMM15_OFFSET (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE))
+#define PROBE_SIZE (PROBE_CPU_XMM15_OFFSET + XMM_SIZE)
+#endif // CPU(X86_64)
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerX86Common::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(X86_64)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(X86_64)
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(X86_64)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm8) == PROBE_CPU_XMM8_OFFSET, ProbeContext_cpu_xmm8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm9) == PROBE_CPU_XMM9_OFFSET, ProbeContext_cpu_xmm9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm10) == PROBE_CPU_XMM10_OFFSET, ProbeContext_cpu_xmm10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm11) == PROBE_CPU_XMM11_OFFSET, ProbeContext_cpu_xmm11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm12) == PROBE_CPU_XMM12_OFFSET, ProbeContext_cpu_xmm12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm13) == PROBE_CPU_XMM13_OFFSET, ProbeContext_cpu_xmm13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm14) == PROBE_CPU_XMM14_OFFSET, ProbeContext_cpu_xmm14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm15) == PROBE_CPU_XMM15_OFFSET, ProbeContext_cpu_xmm15_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(X86_64)
+
+COMPILE_ASSERT(sizeof(MacroAssemblerX86Common::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+
+#if CPU(X86)
+asm (
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    "pushfd" "\n"
+
+    // MacroAssemblerX86Common::probe() has already generated code to store some values.
+    // Together with the eflags pushed above, the top of stack now looks like
+    // this:
+    //     esp[0 * ptrSize]: eflags
+    //     esp[1 * ptrSize]: return address / saved eip
+    //     esp[2 * ptrSize]: probeFunction
+    //     esp[3 * ptrSize]: arg1
+    //     esp[4 * ptrSize]: arg2
+    //     esp[5 * ptrSize]: saved eax
+    //     esp[6 * ptrSize]: saved esp
+
+    "movl %esp, %eax" "\n"
+    "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n"
+
+    // The X86_64 ABI specifies that the worse case stack alignment requirement
+    // is 32 bytes.
+    "andl $~0x1f, %esp" "\n"
+
+    "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n"
+    "movl %esp, %ebp" "\n" // Save the ProbeContext*.
+
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n"
+    "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n"
+    "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n"
+    "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n"
+    "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n"
+
+    "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n"
+    "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n"
+    "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+    "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n"
+    "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n"
+    "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n"
+    "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+
+    "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n"
+    "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n"
+    "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n"
+    "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n"
+    "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n"
+    "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n"
+    "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n"
+    "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n"
+
+    // Reserve stack space for the arg while maintaining the required stack
+    // pointer 32 byte alignment:
+    "subl $0x20, %esp" "\n"
+    "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg.
+
+    "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n"
+
+    // There are 6 more registers left to restore:
+    //     eax, ecx, ebp, esp, eip, and eflags.
+    // We need to handle these last few restores carefully because:
+    //
+    // 1. We need to push the return address on the stack for ret to use.
+    //    That means we need to write to the stack.
+    // 2. The user probe function may have altered the restore value of esp to
+    //    point to the vicinity of one of the restore values for the remaining
+    //    registers left to be restored.
+    //    That means, for requirement 1, we may end up writing over some of the
+    //    restore values. We can check for this, and first copy the restore
+    //    values to a "safe area" on the stack before commencing with the action
+    //    for requirement 1.
+    // 3. For requirement 2, we need to ensure that the "safe area" is
+    //    protected from interrupt handlers overwriting it. Hence, the esp needs
+    //    to be adjusted to include the "safe area" before we start copying the
+    //    the restore values.
+
+    "movl %ebp, %eax" "\n"
+    "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n"
+    "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+    "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+    // rsp will be. This time we don't have to 32-byte align it because we're
+    // not using to store any xmm regs.
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+    "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n"
+    "movl %eax, %esp" "\n"
+
+    "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n"
+    "movl %eax, %ebp" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+    "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n"
+    // At this point, %esp should be < %eax.
+
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+    "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+    "movl %eax, %esp" "\n"
+
+    "popfd" "\n"
+    "popl %eax" "\n"
+    "popl %ecx" "\n"
+    "popl %ebp" "\n"
+    "ret" "\n"
+);
+#endif // CPU(X86)
+
+#if CPU(X86_64)
+asm (
+    ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+    HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+    SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+    "pushfq" "\n"
+
+    // MacroAssemblerX86Common::probe() has already generated code to store some values.
+    // Together with the rflags pushed above, the top of stack now looks like
+    // this:
+    //     esp[0 * ptrSize]: rflags
+    //     esp[1 * ptrSize]: return address / saved rip
+    //     esp[2 * ptrSize]: probeFunction
+    //     esp[3 * ptrSize]: arg1
+    //     esp[4 * ptrSize]: arg2
+    //     esp[5 * ptrSize]: saved rax
+    //     esp[6 * ptrSize]: saved rsp
+
+    "movq %rsp, %rax" "\n"
+    "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n"
+
+    // The X86_64 ABI specifies that the worse case stack alignment requirement
+    // is 32 bytes.
+    "andq $~0x1f, %rsp" "\n"
+
+    "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n"
+    "movq %rsp, %rbp" "\n" // Save the ProbeContext*.
+
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n"
+    "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n"
+    "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n"
+    "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n"
+    "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n"
+
+    "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n"
+    "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n"
+    "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+    "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n"
+    "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n"
+    "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n"
+    "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+
+    "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n"
+    "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n"
+    "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n"
+    "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n"
+    "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n"
+    "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n"
+    "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n"
+    "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n"
+
+    "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n"
+    "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n"
+    "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n"
+    "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n"
+    "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n"
+    "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n"
+    "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n"
+    "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n"
+    "movq %xmm8, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp)" "\n"
+    "movq %xmm9, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp)" "\n"
+    "movq %xmm10, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp)" "\n"
+    "movq %xmm11, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp)" "\n"
+    "movq %xmm12, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp)" "\n"
+    "movq %xmm13, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp)" "\n"
+    "movq %xmm14, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp)" "\n"
+    "movq %xmm15, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp)" "\n"
+
+    "movq %rbp, %rdi" "\n" // the ProbeContext* arg.
+    "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+
+    // To enable probes to modify register state, we copy all registers
+    // out of the ProbeContext before returning.
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp), %xmm8" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp), %xmm9" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp), %xmm10" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp), %xmm11" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp), %xmm12" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp), %xmm13" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp), %xmm14" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp), %xmm15" "\n"
+
+    // There are 6 more registers left to restore:
+    //     rax, rcx, rbp, rsp, rip, and rflags.
+    // We need to handle these last few restores carefully because:
+    //
+    // 1. We need to push the return address on the stack for ret to use
+    //    That means we need to write to the stack.
+    // 2. The user probe function may have altered the restore value of esp to
+    //    point to the vicinity of one of the restore values for the remaining
+    //    registers left to be restored.
+    //    That means, for requirement 1, we may end up writing over some of the
+    //    restore values. We can check for this, and first copy the restore
+    //    values to a "safe area" on the stack before commencing with the action
+    //    for requirement 1.
+    // 3. For both requirement 2, we need to ensure that the "safe area" is
+    //    protected from interrupt handlers overwriting it. Hence, the esp needs
+    //    to be adjusted to include the "safe area" before we start copying the
+    //    the restore values.
+
+    "movq %rbp, %rax" "\n"
+    "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n"
+    "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+    "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+    // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+    // rsp will be. This time we don't have to 32-byte align it because we're
+    // not using to store any xmm regs.
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+    "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n"
+    "movq %rax, %rsp" "\n"
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n"
+    "movq %rax, %rbp" "\n"
+
+    SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+    "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n"
+    // At this point, %rsp should be < %rax.
+
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+    "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+    "movq %rax, %rsp" "\n"
+
+    "popfq" "\n"
+    "popq %rax" "\n"
+    "popq %rcx" "\n"
+    "popq %rbp" "\n"
+    "ret" "\n"
+);
+#endif // CPU(X86_64)
+
+#endif // COMPILER(GCC_OR_CLANG)
+
+// What code is emitted for the probe?
+// ==================================
+// We want to keep the size of the emitted probe invocation code as compact as
+// possible to minimize the perturbation to the JIT generated code. However,
+// we also need to preserve the CPU registers and set up the ProbeContext to be
+// passed to the user probe function.
+//
+// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
+// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
+// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
+// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
+// user probe function, and restoring the CPUState before returning to JIT
+// generated code.
+//
+// What registers need to be saved?
+// ===============================
+// The registers are saved for 2 reasons:
+// 1. To preserve their state in the JITted code. This means that all registers
+//    that are not callee saved needs to be saved. We also need to save the
+//    condition code registers because the probe can be inserted between a test
+//    and a branch.
+// 2. To allow the probe to inspect the values of the registers for debugging
+//    purposes. This means all registers need to be saved.
+//
+// In summary, save everything. But for reasons stated above, we should do the
+// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the
+// full set.
+//
+// What values are in the saved registers?
+// ======================================
+// Conceptually, the saved registers should contain values as if the probe
+// is not present in the JIT generated code. Hence, they should contain values
+// that are expected at the start of the instruction immediately following the
+// probe.
+//
+// Specifically, the saved stack pointer register will point to the stack
+// position before we push the ProbeContext frame. The saved rip will point to
+// the address of the instruction immediately following the probe. 
+
+void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2)
+{
+    push(RegisterID::esp);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(arg2), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(arg1), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(reinterpret_cast(function)), RegisterID::eax);
+    push(RegisterID::eax);
+    move(TrustedImmPtr(reinterpret_cast(ctiMasmProbeTrampoline)), RegisterID::eax);
+    call(RegisterID::eax);
+}
+
+#endif // ENABLE(MASM_PROBE)
+
+#if CPU(X86) && !OS(MAC_OS_X)
+MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_sse4_1CheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_avxCheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_lzcntCheckState = CPUIDCheckState::NotChecked;
+MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_bmi1CheckState = CPUIDCheckState::NotChecked;
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
diff --git a/assembler/MacroAssemblerX86Common.h b/assembler/MacroAssemblerX86Common.h
new file mode 100644
index 0000000..dcc778e
--- /dev/null
+++ b/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,3071 @@
+/*
+ * Copyright (C) 2008, 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include 
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler {
+public:
+#if CPU(X86_64)
+    // Use this directly only if you're not generating code with it.
+    static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11;
+
+    // Use this when generating code so that we get enforcement of the disallowing of scratch register
+    // usage.
+    X86Registers::RegisterID scratchRegister()
+    {
+        RELEASE_ASSERT(m_allowScratchRegister);
+        return s_scratchRegister;
+    }
+#endif
+    
+protected:
+    static const int DoubleConditionBitInvert = 0x10;
+    static const int DoubleConditionBitSpecial = 0x20;
+    static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+public:
+    typedef X86Assembler::XMMRegisterID XMMRegisterID;
+    
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value >= -128 && value <= 127;
+    }
+
+    enum RelationalCondition {
+        Equal = X86Assembler::ConditionE,
+        NotEqual = X86Assembler::ConditionNE,
+        Above = X86Assembler::ConditionA,
+        AboveOrEqual = X86Assembler::ConditionAE,
+        Below = X86Assembler::ConditionB,
+        BelowOrEqual = X86Assembler::ConditionBE,
+        GreaterThan = X86Assembler::ConditionG,
+        GreaterThanOrEqual = X86Assembler::ConditionGE,
+        LessThan = X86Assembler::ConditionL,
+        LessThanOrEqual = X86Assembler::ConditionLE
+    };
+
+    enum ResultCondition {
+        Overflow = X86Assembler::ConditionO,
+        Signed = X86Assembler::ConditionS,
+        PositiveOrZero = X86Assembler::ConditionNS,
+        Zero = X86Assembler::ConditionE,
+        NonZero = X86Assembler::ConditionNE
+    };
+
+    // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler.
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
+        DoubleNotEqual = X86Assembler::ConditionNE,
+        DoubleGreaterThan = X86Assembler::ConditionA,
+        DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+        DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+        DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = X86Assembler::ConditionE,
+        DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+        DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+        DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+        DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+        DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
+    };
+    COMPILE_ASSERT(
+        !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+        DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
+
+    static const RegisterID stackPointerRegister = X86Registers::esp;
+    static const RegisterID framePointerRegister = X86Registers::ebp;
+    
+    static bool canBlind() { return true; }
+    static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+    static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+
+    // Integer arithmetic operations:
+    //
+    // Operations are typically two operand - operation(source, srcDst)
+    // For many operations the source may be an TrustedImm32, the srcDst operand
+    // may often be a memory location (explictly described using an Address
+    // object).
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.addl_rr(src, dest);
+    }
+
+    void add32(TrustedImm32 imm, Address address)
+    {
+        m_assembler.addl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void add32(TrustedImm32 imm, BaseIndex address)
+    {
+        m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void add8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.addb_im(imm8.m_value, address.offset, address.base);
+    }
+
+    void add8(TrustedImm32 imm, BaseIndex address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.addb_im(imm8.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void add16(TrustedImm32 imm, Address address)
+    {
+        m_assembler.addw_im(imm.m_value, address.offset, address.base);
+    }
+
+    void add16(TrustedImm32 imm, BaseIndex address)
+    {
+        m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.inc_r(dest);
+        else
+            m_assembler.addl_ir(imm.m_value, dest);
+    }
+    
+    void add32(Address src, RegisterID dest)
+    {
+        m_assembler.addl_mr(src.offset, src.base, dest);
+    }
+
+    void add32(RegisterID src, Address dest)
+    {
+        m_assembler.addl_rm(src, dest.offset, dest.base);
+    }
+
+    void add32(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
+    void add8(RegisterID src, Address dest)
+    {
+        m_assembler.addb_rm(src, dest.offset, dest.base);
+    }
+
+    void add8(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
+    void add16(RegisterID src, Address dest)
+    {
+        m_assembler.addw_rm(src, dest.offset, dest.base);
+    }
+
+    void add16(RegisterID src, BaseIndex dest)
+    {
+        m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale);
+    }
+
+    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (!imm.m_value) {
+            zeroExtend32ToPtr(src, dest);
+            return;
+        }
+
+        if (src == dest) {
+            add32(imm, dest);
+            return;
+        }
+
+        m_assembler.leal_mr(imm.m_value, src, dest);
+    }
+
+    void add32(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        x86Lea32(BaseIndex(a, b, TimesOne), dest);
+    }
+
+    void x86Lea32(BaseIndex index, RegisterID dest)
+    {
+        if (!index.scale && !index.offset) {
+            if (index.base == dest) {
+                add32(index.index, dest);
+                return;
+            }
+            if (index.index == dest) {
+                add32(index.base, dest);
+                return;
+            }
+        }
+        m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andl_rr(src, dest);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.andl_ir(imm.m_value, dest);
+    }
+
+    void and32(RegisterID src, Address dest)
+    {
+        m_assembler.andl_rm(src, dest.offset, dest.base);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        m_assembler.andl_mr(src.offset, src.base, dest);
+    }
+
+    void and32(TrustedImm32 imm, Address address)
+    {
+        m_assembler.andl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            zeroExtend32ToPtr(op1, dest);
+        else if (op1 == dest)
+            and32(op2, dest);
+        else {
+            move32IfNeeded(op2, dest);
+            and32(op1, dest);
+        }
+    }
+
+    void and32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            and32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            and32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
+            and32(op1, dest);
+        }
+    }
+
+    void and32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        and32(op2, op1, dest);
+    }
+
+    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        and32(imm, dest);
+    }
+
+    void countLeadingZeros32(RegisterID src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcnt_rr(src, dst);
+            return;
+        }
+        m_assembler.bsr_rr(src, dst);
+        clz32AfterBsr(dst);
+    }
+
+    void countLeadingZeros32(Address src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcnt_mr(src.offset, src.base, dst);
+            return;
+        }
+        m_assembler.bsr_mr(src.offset, src.base, dst);
+        clz32AfterBsr(dst);
+    }
+
+    void countTrailingZeros32(RegisterID src, RegisterID dst)
+    {
+        if (supportsBMI1()) {
+            m_assembler.tzcnt_rr(src, dst);
+            return;
+        }
+        m_assembler.bsf_rr(src, dst);
+        ctzAfterBsf<32>(dst);
+    }
+
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_assembler.illegalInstruction();
+    }
+    
+    void lshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        if (shift_amount == X86Registers::ecx)
+            m_assembler.shll_CLr(dest);
+        else {
+            ASSERT(shift_amount != dest);
+            // On x86 we can only shift by ecx; if asked to shift by another register we'll
+            // need rejig the shift amount into ecx first, and restore the registers afterwards.
+            // If we dest is ecx, then shift the swapped register!
+            swap(shift_amount, X86Registers::ecx);
+            m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+            swap(shift_amount, X86Registers::ecx);
+        }
+    }
+
+    void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+    {
+        ASSERT(shift_amount != dest);
+
+        move32IfNeeded(src, dest);
+        lshift32(shift_amount, dest);
+    }
+
+    void lshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.shll_i8r(imm.m_value, dest);
+    }
+    
+    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        lshift32(imm, dest);
+    }
+    
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.imull_rr(src, dest);
+    }
+
+    void mul32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src2 == dest) {
+            m_assembler.imull_rr(src1, dest);
+            return;
+        }
+        move32IfNeeded(src1, dest);
+        m_assembler.imull_rr(src2, dest);
+    }
+
+    void mul32(Address src, RegisterID dest)
+    {
+        m_assembler.imull_mr(src.offset, src.base, dest);
+    }
+
+    void mul32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            mul32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            mul32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
+            mul32(op1, dest);
+        }
+    }
+
+    void mul32(RegisterID src1, Address src2, RegisterID dest)
+    {
+        mul32(src2, src1, dest);
+    }
+    
+    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.imull_i32r(src, imm.m_value, dest);
+    }
+
+    void x86ConvertToDoubleWord32()
+    {
+        m_assembler.cdq();
+    }
+
+    void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86ConvertToDoubleWord32();
+    }
+
+    void x86Div32(RegisterID denominator)
+    {
+        m_assembler.idivl_r(denominator);
+    }
+
+    void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86Div32(denominator);
+    }
+
+    void x86UDiv32(RegisterID denominator)
+    {
+        m_assembler.divl_r(denominator);
+    }
+
+    void x86UDiv32(RegisterID eax, RegisterID edx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(eax, eax == X86Registers::eax);
+        ASSERT_UNUSED(edx, edx == X86Registers::edx);
+        x86UDiv32(denominator);
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.negl_r(srcDest);
+    }
+
+    void neg32(Address srcDest)
+    {
+        m_assembler.negl_m(srcDest.offset, srcDest.base);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orl_rr(src, dest);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.orl_ir(imm.m_value, dest);
+    }
+
+    void or32(RegisterID src, Address dest)
+    {
+        m_assembler.orl_rm(src, dest.offset, dest.base);
+    }
+
+    void or32(Address src, RegisterID dest)
+    {
+        m_assembler.orl_mr(src.offset, src.base, dest);
+    }
+
+    void or32(TrustedImm32 imm, Address address)
+    {
+        m_assembler.orl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            zeroExtend32ToPtr(op1, dest);
+        else if (op1 == dest)
+            or32(op2, dest);
+        else {
+            move32IfNeeded(op2, dest);
+            or32(op1, dest);
+        }
+    }
+
+    void or32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            or32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            or32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
+            or32(op1, dest);
+        }
+    }
+
+    void or32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        or32(op2, op1, dest);
+    }
+
+    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        or32(imm, dest);
+    }
+
+    void rshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        if (shift_amount == X86Registers::ecx)
+            m_assembler.sarl_CLr(dest);
+        else {
+            ASSERT(shift_amount != dest);
+            
+            // On x86 we can only shift by ecx; if asked to shift by another register we'll
+            // need rejig the shift amount into ecx first, and restore the registers afterwards.
+            // If we dest is ecx, then shift the swapped register!
+            swap(shift_amount, X86Registers::ecx);
+            m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+            swap(shift_amount, X86Registers::ecx);
+        }
+    }
+
+    void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+    {
+        ASSERT(shift_amount != dest);
+
+        move32IfNeeded(src, dest);
+        rshift32(shift_amount, dest);
+    }
+
+    void rshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.sarl_i8r(imm.m_value, dest);
+    }
+    
+    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        rshift32(imm, dest);
+    }
+    
+    void urshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        if (shift_amount == X86Registers::ecx)
+            m_assembler.shrl_CLr(dest);
+        else {
+            ASSERT(shift_amount != dest);
+        
+            // On x86 we can only shift by ecx; if asked to shift by another register we'll
+            // need rejig the shift amount into ecx first, and restore the registers afterwards.
+            // If we dest is ecx, then shift the swapped register!
+            swap(shift_amount, X86Registers::ecx);
+            m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+            swap(shift_amount, X86Registers::ecx);
+        }
+    }
+
+    void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+    {
+        ASSERT(shift_amount != dest);
+
+        move32IfNeeded(src, dest);
+        urshift32(shift_amount, dest);
+    }
+
+    void urshift32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.shrl_i8r(imm.m_value, dest);
+    }
+    
+    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        urshift32(imm, dest);
+    }
+
+    void rotateRight32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rorl_i8r(imm.m_value, dest);
+    }
+
+    void rotateRight32(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rorl_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rorl_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateLeft32(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.roll_i8r(imm.m_value, dest);
+    }
+
+    void rotateLeft32(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.roll_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.roll_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subl_rr(src, dest);
+    }
+
+    void sub32(RegisterID left, RegisterID right, RegisterID dest)
+    {
+        if (dest == right) {
+            neg32(dest);
+            add32(left, dest);
+            return;
+        }
+        move(left, dest);
+        sub32(right, dest);
+    }
+
+    void sub32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.dec_r(dest);
+        else
+            m_assembler.subl_ir(imm.m_value, dest);
+    }
+    
+    void sub32(TrustedImm32 imm, Address address)
+    {
+        m_assembler.subl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        m_assembler.subl_mr(src.offset, src.base, dest);
+    }
+
+    void sub32(RegisterID src, Address dest)
+    {
+        m_assembler.subl_rm(src, dest.offset, dest.base);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xorl_rr(src, dest);
+    }
+
+    void xor32(TrustedImm32 imm, Address dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.notl_m(dest.offset, dest.base);
+        else
+            m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == -1)
+            m_assembler.notl_r(dest);
+        else
+            m_assembler.xorl_ir(imm.m_value, dest);
+    }
+
+    void xor32(RegisterID src, Address dest)
+    {
+        m_assembler.xorl_rm(src, dest.offset, dest.base);
+    }
+
+    void xor32(Address src, RegisterID dest)
+    {
+        m_assembler.xorl_mr(src.offset, src.base, dest);
+    }
+    
+    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            move(TrustedImm32(0), dest);
+        else if (op1 == dest)
+            xor32(op2, dest);
+        else {
+            move32IfNeeded(op2, dest);
+            xor32(op1, dest);
+        }
+    }
+
+    void xor32(Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            xor32(op1, dest);
+        else if (op1.base == dest) {
+            load32(op1, dest);
+            xor32(op2, dest);
+        } else {
+            zeroExtend32ToPtr(op2, dest);
+            xor32(op1, dest);
+        }
+    }
+
+    void xor32(RegisterID op1, Address op2, RegisterID dest)
+    {
+        xor32(op2, op1, dest);
+    }
+
+    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        xor32(imm, dest);
+    }
+
+    void not32(RegisterID srcDest)
+    {
+        m_assembler.notl_r(srcDest);
+    }
+
+    void not32(Address dest)
+    {
+        m_assembler.notl_m(dest.offset, dest.base);
+    }
+
+    void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.sqrtsd_rr(src, dst);
+    }
+
+    void sqrtDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.sqrtsd_mr(src.offset, src.base, dst);
+    }
+
+    void sqrtFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.sqrtss_rr(src, dst);
+    }
+
+    void sqrtFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.sqrtss_mr(src.offset, src.base, dst);
+    }
+
+    void absDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        ASSERT(src != dst);
+        static const double negativeZeroConstant = -0.0;
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+        m_assembler.andnpd_rr(src, dst);
+    }
+
+    void negateDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        ASSERT(src != dst);
+        static const double negativeZeroConstant = -0.0;
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
+        m_assembler.xorpd_rr(src, dst);
+    }
+
+    void ceilDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void ceilFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti);
+    }
+
+    void floorDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void floorFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti);
+    }
+
+    void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
+    }
+
+    void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven);
+    }
+
+    void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroDouble(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    void roundTowardZeroFloat(Address src, FPRegisterID dst)
+    {
+        m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero);
+    }
+
+    // Memory access operations:
+    //
+    // Loads are of the form load(address, destination) and stores of the form
+    // store(source, address).  The source for a store may be an TrustedImm32.  Address
+    // operand objects to loads and store will be implicitly constructed if a
+    // register is passed.
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address.offset, address.base, dest);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void load16Unaligned(BaseIndex address, RegisterID dest)
+    {
+        load16(address, dest);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+        return DataLabel32(this);
+    }
+    
+    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+        return DataLabelCompact(this);
+    }
+    
+    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+    {
+        ASSERT(isCompactPtrAlignedAddressOffset(value));
+        AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
+    }
+    
+    DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+        return DataLabelCompact(this);
+    }
+
+    void load8(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void load8(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.movzbl_mr(address.offset, address.base, dest);
+    }
+    
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.movsbl_mr(address.offset, address.base, dest);
+    }
+
+    void zeroExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movzbl_rr(src, dest);
+    }
+    
+    void signExtend8To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movsbl_rr(src, dest);
+    }
+    
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+    
+    void load16(Address address, RegisterID dest)
+    {
+        m_assembler.movzwl_mr(address.offset, address.base, dest);
+    }
+
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+    
+    void load16SignedExtendTo32(Address address, RegisterID dest)
+    {
+        m_assembler.movswl_mr(address.offset, address.base, dest);
+    }
+
+    void zeroExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movzwl_rr(src, dest);
+    }
+    
+    void signExtend16To32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movswl_rr(src, dest);
+    }
+    
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        padBeforePatch();
+        m_assembler.movl_rm_disp32(src, address.offset, address.base);
+        return DataLabel32(this);
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.movl_rm(src, address.offset, address.base);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+
+    void store32(TrustedImm32 imm, ImplicitAddress address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+    }
+    
+    void store32(TrustedImm32 imm, BaseIndex address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void storeZero32(ImplicitAddress address)
+    {
+        store32(TrustedImm32(0), address);
+    }
+
+    void storeZero32(BaseIndex address)
+    {
+        store32(TrustedImm32(0), address);
+    }
+
+    void store8(TrustedImm32 imm, Address address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address.offset, address.base);
+    }
+
+    void store8(TrustedImm32 imm, BaseIndex address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        m_assembler.movb_i8m(imm8.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
+    {
+        if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+            return X86Registers::eax;
+
+        if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+            return X86Registers::ebx;
+
+        ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+        return X86Registers::ecx;
+    }
+
+    static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
+    {
+        if (address.base != X86Registers::eax)
+            return X86Registers::eax;
+
+        ASSERT(address.base != X86Registers::edx);
+        return X86Registers::edx;
+    }
+
+    void store8(RegisterID src, BaseIndex address)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+    
+    void store8(RegisterID src, Address address)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movb_rm(temp, address.offset, address.base);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movb_rm(src, address.offset, address.base);
+    }
+
+    void store16(RegisterID src, BaseIndex address)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+
+    void store16(RegisterID src, Address address)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movw_rm(temp, address.offset, address.base);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movw_rm(src, address.offset, address.base);
+    }
+
+
+    // Floating-point operation:
+    //
+    // Presently only supports SSE, not x87 floating point.
+
+    void moveDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        if (src != dest)
+            m_assembler.movaps_rr(src, dest);
+    }
+
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+    {
+#if CPU(X86)
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_mr(address.m_value, dest);
+#else
+        move(address, scratchRegister());
+        loadDouble(scratchRegister(), dest);
+#endif
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_mr(address.offset, address.base, dest);
+    }
+
+    void loadDouble(BaseIndex address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void loadFloat(ImplicitAddress address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_mr(address.offset, address.base, dest);
+    }
+
+    void loadFloat(BaseIndex address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_rm(src, address.offset, address.base);
+    }
+    
+    void storeDouble(FPRegisterID src, BaseIndex address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+
+    void storeFloat(FPRegisterID src, ImplicitAddress address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_rm(src, address.offset, address.base);
+    }
+
+    void storeFloat(FPRegisterID src, BaseIndex address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+    
+    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsd2ss_rr(src, dst);
+    }
+
+    void convertDoubleToFloat(Address address, FPRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsd2ss_mr(address.offset, address.base, dst);
+    }
+
+    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtss2sd_rr(src, dst);
+    }
+
+    void convertFloatToDouble(Address address, FPRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtss2sd_mr(address.offset, address.base, dst);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        addDouble(src, dest, dest);
+    }
+
+    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddsd_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.addsd_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.addsd_rr(op1, dest);
+            }
+        }
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        addDouble(src, dest, dest);
+    }
+
+    void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddsd_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addsd_mr(op1.offset, op1.base, dest);
+                return;
+            }
+
+            loadDouble(op1, dest);
+            addDouble(op2, dest);
+        }
+    }
+
+    void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        addDouble(op2, op1, dest);
+    }
+
+    void addDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            addDouble(op2, dest);
+        }
+    }
+
+    void addFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        addFloat(src, dest, dest);
+    }
+
+    void addFloat(Address src, FPRegisterID dest)
+    {
+        addFloat(src, dest, dest);
+    }
+
+    void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.addss_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.addss_rr(op1, dest);
+            }
+        }
+    }
+
+    void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addss_mr(op1.offset, op1.base, dest);
+                return;
+            }
+
+            loadFloat(op1, dest);
+            addFloat(op2, dest);
+        }
+    }
+
+    void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        addFloat(op2, op1, dest);
+    }
+
+    void addFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vaddss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.addss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            addFloat(op2, dest);
+        }
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divsd_rr(src, dest);
+    }
+
+    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        // B := A / B is invalid.
+        ASSERT(op1 == dest || op2 != dest);
+
+        moveDouble(op1, dest);
+        divDouble(op2, dest);
+    }
+
+    void divDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divsd_mr(src.offset, src.base, dest);
+    }
+
+    void divFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divss_rr(src, dest);
+    }
+
+    void divFloat(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divss_mr(src.offset, src.base, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        subDouble(dest, src, dest);
+    }
+
+    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubsd_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+
+            // B := A - B is invalid.
+            ASSERT(op1 == dest || op2 != dest);
+            moveDouble(op1, dest);
+            m_assembler.subsd_rr(op2, dest);
+        }
+    }
+
+    void subDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubsd_mr(op1, op2.offset, op2.base, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subsd_mr(op2.offset, op2.base, dest);
+        }
+    }
+
+    void subDouble(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubsd_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subsd_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
+        }
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        subDouble(dest, src, dest);
+    }
+
+    void subFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        subFloat(dest, src, dest);
+    }
+
+    void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            // B := A - B is invalid.
+            ASSERT(op1 == dest || op2 != dest);
+            moveDouble(op1, dest);
+            m_assembler.subss_rr(op2, dest);
+        }
+    }
+
+    void subFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_mr(op1, op2.offset, op2.base, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subss_mr(op2.offset, op2.base, dest);
+        }
+    }
+
+    void subFloat(FPRegisterID op1, BaseIndex op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vsubss_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest);
+        else {
+            moveDouble(op1, dest);
+            m_assembler.subss_mr(op2.offset, op2.base, op2.index, op2.scale, dest);
+        }
+    }
+
+    void subFloat(Address src, FPRegisterID dest)
+    {
+        subFloat(dest, src, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        mulDouble(src, dest, dest);
+    }
+
+    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulsd_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.mulsd_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.mulsd_rr(op1, dest);
+            }
+        }
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        mulDouble(src, dest, dest);
+    }
+
+    void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulsd_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulsd_mr(op1.offset, op1.base, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            mulDouble(op2, dest);
+        }
+    }
+
+    void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        return mulDouble(op2, op1, dest);
+    }
+
+    void mulDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadDouble(op1, dest);
+            mulDouble(op2, dest);
+        }
+    }
+
+    void mulFloat(FPRegisterID src, FPRegisterID dest)
+    {
+        mulFloat(src, dest, dest);
+    }
+
+    void mulFloat(Address src, FPRegisterID dest)
+    {
+        mulFloat(src, dest, dest);
+    }
+
+    void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_rr(op1, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op1 == dest)
+                m_assembler.mulss_rr(op2, dest);
+            else {
+                moveDouble(op2, dest);
+                m_assembler.mulss_rr(op1, dest);
+            }
+        }
+    }
+
+    void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_mr(op1.offset, op1.base, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulss_mr(op1.offset, op1.base, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            mulFloat(op2, dest);
+        }
+    }
+
+    void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest)
+    {
+        mulFloat(op2, op1, dest);
+    }
+
+    void mulFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest)
+    {
+        if (supportsAVX())
+            m_assembler.vmulss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest);
+        else {
+            ASSERT(isSSE2Present());
+            if (op2 == dest) {
+                m_assembler.mulss_mr(op1.offset, op1.base, op1.index, op1.scale, dest);
+                return;
+            }
+            loadFloat(op1, dest);
+            mulFloat(op2, dest);
+        }
+    }
+
+    void andDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        // ANDPS is defined on 128bits and is shorter than ANDPD.
+        m_assembler.andps_rr(src, dst);
+    }
+
+    void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            andDouble(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            andDouble(src1, dst);
+        }
+    }
+
+    void andFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.andps_rr(src, dst);
+    }
+
+    void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            andFloat(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            andFloat(src1, dst);
+        }
+    }
+
+    void xorDouble(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.xorps_rr(src, dst);
+    }
+
+    void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            xorDouble(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            xorDouble(src1, dst);
+        }
+    }
+
+    void xorFloat(FPRegisterID src, FPRegisterID dst)
+    {
+        m_assembler.xorps_rr(src, dst);
+    }
+
+    void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst)
+    {
+        if (src1 == dst)
+            xorFloat(src2, dst);
+        else {
+            moveDouble(src2, dst);
+            xorFloat(src1, dst);
+        }
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2sd_rr(src, dest);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+    }
+
+    void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2ss_rr(src, dest);
+    }
+
+    void convertInt32ToFloat(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2ss_mr(src.offset, src.base, dest);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+        return jumpAfterFloatingPointCompare(cond, left, right);
+    }
+
+    Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        return jumpAfterFloatingPointCompare(cond, left, right);
+    }
+
+    // Truncates 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, INT_MIN).
+    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttsd2si_rr(src, dest);
+        return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
+    }
+
+    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttsd2si_rr(src, dest);
+    }
+
+    void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttss2si_rr(src, dest);
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttsd2si_rr(src, dest);
+
+        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+#if CPU(X86_64)
+        if (negZeroCheck) {
+            Jump valueIsNonZero = branchTest32(NonZero, dest);
+            m_assembler.movmskpd_rr(src, scratchRegister());
+            failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1)));
+            valueIsNonZero.link(this);
+        }
+#else
+        if (negZeroCheck)
+            failureCases.append(branchTest32(Zero, dest));
+#endif
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        convertInt32ToDouble(dest, fpTemp);
+        m_assembler.ucomisd_rr(fpTemp, src);
+        failureCases.append(m_assembler.jp());
+        failureCases.append(m_assembler.jne());
+    }
+
+    void moveZeroToDouble(FPRegisterID reg)
+    {
+        m_assembler.xorps_rr(reg, reg);
+    }
+
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.xorpd_rr(scratch, scratch);
+        return branchDouble(DoubleNotEqual, reg, scratch);
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.xorpd_rr(scratch, scratch);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+    }
+
+    void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.psllq_i8r(imm.m_value, reg);
+    }
+
+    void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.psrlq_i8r(imm.m_value, reg);
+    }
+
+    void orPacked(XMMRegisterID src, XMMRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.por_rr(src, dst);
+    }
+
+    void move32ToFloat(RegisterID src, XMMRegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movd_rr(src, dst);
+    }
+
+    void moveFloatTo32(XMMRegisterID src, RegisterID dst)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movd_rr(src, dst);
+    }
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data.  Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack.  Peek and poke operations read or write
+    // values on the stack, without moving the current stack position.
+    
+    void pop(RegisterID dest)
+    {
+        m_assembler.pop_r(dest);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.push_r(src);
+    }
+
+    void push(Address address)
+    {
+        m_assembler.push_m(address.offset, address.base);
+    }
+
+    void push(TrustedImm32 imm)
+    {
+        m_assembler.push_i32(imm.m_value);
+    }
+
+
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(TrustedImm32 imm, RegisterID dest)
+    {
+        // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
+        // may be useful to have a separate version that sign extends the value?
+        if (!imm.m_value)
+            m_assembler.xorl_rr(dest, dest);
+        else
+            m_assembler.movl_i32r(imm.m_value, dest);
+    }
+
+#if CPU(X86_64)
+    void move(RegisterID src, RegisterID dest)
+    {
+        // Note: on 64-bit this is is a full register move; perhaps it would be
+        // useful to have separate move32 & movePtr, with move32 zero extending?
+        if (src != dest)
+            m_assembler.movq_rr(src, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.movq_i64r(imm.asIntptr(), dest);
+    }
+
+    void move(TrustedImm64 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.movq_i64r(imm.m_value, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        RegisterID src;
+        if (elseCase == dest)
+            src = thenCase;
+        else {
+            cond = invert(cond);
+            src = elseCase;
+        }
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+
+    void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        RegisterID src;
+        if (elseCase == dest)
+            src = thenCase;
+        else {
+            cond = invert(cond);
+            src = elseCase;
+        }
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomiss_rr(left, right);
+        else
+            m_assembler.ucomiss_rr(right, left);
+        moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest);
+    }
+    
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2)
+            m_assembler.xchgq_rr(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorq_rr(dest, dest);
+        else
+            m_assembler.mov_i32r(imm.m_value, dest);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movsxd_rr(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movl_rr(src, dest);
+    }
+
+    void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest)
+    {
+        m_assembler.movl_i32r(src.m_value, dest);
+    }
+#else
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.movl_rr(src, dest);
+    }
+
+    void move(TrustedImmPtr imm, RegisterID dest)
+    {
+        if (!imm.m_value)
+            m_assembler.xorl_rr(dest, dest);
+        else
+            m_assembler.movl_i32r(imm.asIntptr(), dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpl_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmovel_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpl_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpl_rr(src, dest);
+            m_assembler.cmovnel_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.cmovl_rr(static_cast(cond & ~DoubleConditionBits), src, dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2)
+            m_assembler.xchgl_rr(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+#endif
+
+    void swap32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xchgl_rr(src, dest);
+    }
+
+    void swap32(RegisterID src, Address dest)
+    {
+        m_assembler.xchgl_rm(src, dest.offset, dest.base);
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.testl_rr(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        m_assembler.testl_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
+    {
+        test32(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        test32(testReg, mask);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    template
+    void moveDoubleConditionally32(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (thenCase != dest && elseCase != dest) {
+            moveDouble(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest) {
+            Jump falseCase = branch32(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else {
+            Jump trueCase = branch32(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+    }
+
+    template
+    void moveDoubleConditionallyTest32(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (elseCase == dest && isInvertible(cond)) {
+            Jump falseCase = branchTest32(invert(cond), test, mask);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchTest32(cond, test, mask);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+
+        Jump trueCase = branchTest32(cond, test, mask);
+        moveDouble(elseCase, dest);
+        Jump falseCase = jump();
+        trueCase.link(this);
+        moveDouble(thenCase, dest);
+        falseCase.link(this);
+    }
+
+    void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (elseCase == dest) {
+            Jump falseCase = branchDouble(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchDouble(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        } else {
+            Jump trueCase = branchDouble(cond, left, right);
+            moveDouble(elseCase, dest);
+            Jump falseCase = jump();
+            trueCase.link(this);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        }
+    }
+
+    void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        if (elseCase == dest) {
+            Jump falseCase = branchFloat(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchFloat(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        } else {
+            Jump trueCase = branchFloat(cond, left, right);
+            moveDouble(elseCase, dest);
+            Jump falseCase = jump();
+            trueCase.link(this);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        }
+    }
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+
+public:
+    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmpl_rr(right, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest32(*resultCondition, left, left);
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+    {
+        m_assembler.cmpl_mr(right.offset, right.base, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+    {
+        m_assembler.cmpl_rm(right, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        m_assembler.testl_rr(reg, mask);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.testl_rr(reg, reg);
+        else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+            if (mask.m_value == 0xff)
+                m_assembler.testb_rr(reg, reg);
+            else
+                m_assembler.testb_i8r(mask.m_value, reg);
+        } else
+            m_assembler.testl_i32r(mask.m_value, reg);
+    }
+
+    Jump branch(ResultCondition cond)
+    {
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        test32(reg, mask);
+        return branch(cond);
+    }
+
+    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        generateTest32(address, mask);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
+            m_assembler.cmpb_im(0, address.offset, address.base);
+        else
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
+            m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
+        else
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base, address.index, address.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+    {
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base, left.index, left.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump jump()
+    {
+        return Jump(m_assembler.jmp());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.jmp_r(target);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(Address address)
+    {
+        m_assembler.jmp_m(address.offset, address.base);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(BaseIndex address)
+    {
+        m_assembler.jmp_m(address.offset, address.base, address.index, address.scale);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation.  The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        add32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
+    {
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
+    {
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchAdd32(cond, src2, dest);
+        move32IfNeeded(src2, dest);
+        return branchAdd32(cond, src1, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            return branchAdd32(cond, op1, dest);
+        if (op1.base == dest) {
+            load32(op1, dest);
+            return branchAdd32(cond, op2, dest);
+        }
+        zeroExtend32ToPtr(op2, dest);
+        return branchAdd32(cond, op1, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
+    {
+        return branchAdd32(cond, src2, src1, dest);
+    }
+
+    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        move32IfNeeded(src, dest);
+        return branchAdd32(cond, imm, dest);
+    }
+
+    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        mul32(src, dest);
+        if (cond != Overflow)
+            m_assembler.testl_rr(dest, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        mul32(src, dest);
+        if (cond != Overflow)
+            m_assembler.testl_rr(dest, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+    {
+        mul32(imm, src, dest);
+        if (cond != Overflow)
+            m_assembler.testl_rr(dest, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchMul32(cond, src2, dest);
+        move32IfNeeded(src2, dest);
+        return branchMul32(cond, src1, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        sub32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
+    {
+        sub32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
+    {
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
+    {
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        // B := A - B is invalid.
+        ASSERT(src1 == dest || src2 != dest);
+
+        move32IfNeeded(src1, dest);
+        return branchSub32(cond, src2, dest);
+    }
+
+    Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+    {
+        move32IfNeeded(src1, dest);
+        return branchSub32(cond, src2, dest);
+    }
+
+    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+    {
+        neg32(srcDest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        or32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+
+    // Miscellaneous operations:
+
+    void breakpoint()
+    {
+        m_assembler.int3();
+    }
+
+    Call nearTailCall()
+    {
+        return Call(m_assembler.jmp(), Call::LinkableNearTail);
+    }
+
+    Call nearCall()
+    {
+        return Call(m_assembler.call(), Call::LinkableNear);
+    }
+
+    Call call(RegisterID target)
+    {
+        return Call(m_assembler.call(target), Call::None);
+    }
+
+    void call(Address address)
+    {
+        m_assembler.call_m(address.offset, address.base);
+    }
+
+    void ret()
+    {
+        m_assembler.ret();
+    }
+
+    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+    {
+        TrustedImm32 right8(static_cast(right.m_value));
+        m_assembler.cmpb_im(right8.m_value, left.offset, left.base);
+        set32(x86Condition(cond), dest);
+    }
+    
+    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+        set32(x86Condition(cond), dest);
+    }
+
+    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test32(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpl_ir(right.m_value, left);
+        set32(x86Condition(cond), dest);
+    }
+
+    // FIXME:
+    // The mask should be optional... perhaps the argument order should be
+    // dest-src, operations always have a dest? ... possibly not true, considering
+    // asm ops like test, or pseudo ops like pop().
+
+    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        if (mask8.m_value == -1)
+            m_assembler.cmpb_im(0, address.offset, address.base);
+        else
+            m_assembler.testb_im(mask8.m_value, address.offset, address.base);
+        set32(x86Condition(cond), dest);
+    }
+
+    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+    {
+        generateTest32(address, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    void test32(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.testl_rr(reg, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+    {
+        test32(reg, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    void setCarry(RegisterID dest)
+    {
+        set32(X86Assembler::ConditionC, dest);
+    }
+
+    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+    static RelationalCondition invert(RelationalCondition cond)
+    {
+        return static_cast(cond ^ 1);
+    }
+
+    static DoubleCondition invert(DoubleCondition cond)
+    {
+        switch (cond) {
+        case DoubleEqual:
+            return DoubleNotEqualOrUnordered;
+        case DoubleNotEqual:
+            return DoubleEqualOrUnordered;
+        case DoubleGreaterThan:
+            return DoubleLessThanOrEqualOrUnordered;
+        case DoubleGreaterThanOrEqual:
+            return DoubleLessThanOrUnordered;
+        case DoubleLessThan:
+            return DoubleGreaterThanOrEqualOrUnordered;
+        case DoubleLessThanOrEqual:
+            return DoubleGreaterThanOrUnordered;
+        case DoubleEqualOrUnordered:
+            return DoubleNotEqual;
+        case DoubleNotEqualOrUnordered:
+            return DoubleEqual;
+        case DoubleGreaterThanOrUnordered:
+            return DoubleLessThanOrEqual;
+        case DoubleGreaterThanOrEqualOrUnordered:
+            return DoubleLessThan;
+        case DoubleLessThanOrUnordered:
+            return DoubleGreaterThanOrEqual;
+        case DoubleLessThanOrEqualOrUnordered:
+            return DoubleGreaterThan;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return DoubleEqual; // make compiler happy
+    }
+
+    static bool isInvertible(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+        case NonZero:
+        case Signed:
+        case PositiveOrZero:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static ResultCondition invert(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+            return NonZero;
+        case NonZero:
+            return Zero;
+        case Signed:
+            return PositiveOrZero;
+        case PositiveOrZero:
+            return Signed;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Zero; // Make compiler happy for release builds.
+        }
+    }
+
+    static std::optional commuteCompareToZeroIntoTest(RelationalCondition cond)
+    {
+        switch (cond) {
+        case Equal:
+            return Zero;
+        case NotEqual:
+            return NonZero;
+        case LessThan:
+            return Signed;
+        case GreaterThanOrEqual:
+            return PositiveOrZero;
+            break;
+        default:
+            return std::nullopt;
+        }
+    }
+
+    void nop()
+    {
+        m_assembler.nop();
+    }
+    
+    // We take memoryFence to mean acqrel. This has acqrel semantics on x86.
+    void memoryFence()
+    {
+        // lock; orl $0, (%rsp)
+        m_assembler.lock();
+        m_assembler.orl_im(0, 0, X86Registers::esp);
+    }
+
+    // We take this to mean that it prevents motion of normal stores. So, it's a no-op on x86.
+    void storeFence()
+    {
+    }
+
+    // We take this to mean that it prevents motion of normal loads. So, it's a no-op on x86.
+    void loadFence()
+    {
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return X86Assembler::maxJumpReplacementSize();
+    }
+
+    static ptrdiff_t patchableJumpSize()
+    {
+        return X86Assembler::patchableJumpSize();
+    }
+
+    static bool supportsFloatingPointRounding()
+    {
+        if (s_sse4_1CheckState == CPUIDCheckState::NotChecked)
+            updateEax1EcxFlags();
+        return s_sse4_1CheckState == CPUIDCheckState::Set;
+    }
+
+    static bool supportsAVX()
+    {
+        // AVX still causes mysterious regressions and those regressions can be massive.
+        return false;
+    }
+
+    static void updateEax1EcxFlags()
+    {
+        int flags = 0;
+#if COMPILER(MSVC)
+        int cpuInfo[4];
+        __cpuid(cpuInfo, 0x1);
+        flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+#if CPU(X86_64)
+        asm (
+            "movl $0x1, %%eax;"
+            "cpuid;"
+            "movl %%ecx, %0;"
+            : "=g" (flags)
+            :
+            : "%eax", "%ebx", "%ecx", "%edx"
+            );
+#else
+        asm (
+            "movl $0x1, %%eax;"
+            "pushl %%ebx;"
+            "cpuid;"
+            "popl %%ebx;"
+            "movl %%ecx, %0;"
+            : "=g" (flags)
+            :
+            : "%eax", "%ecx", "%edx"
+            );
+#endif
+#endif // COMPILER(GCC_OR_CLANG)
+        s_sse4_1CheckState = (flags & (1 << 19)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        s_avxCheckState = (flags & (1 << 28)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+    }
+
+#if ENABLE(MASM_PROBE)
+    void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
+protected:
+    X86Assembler::Condition x86Condition(RelationalCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    X86Assembler::Condition x86Condition(ResultCondition cond)
+    {
+        return static_cast(cond);
+    }
+
+    void set32(X86Assembler::Condition cond, RegisterID dest)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only set the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (dest >= 4) {
+            m_assembler.xchgl_rr(dest, X86Registers::eax);
+            m_assembler.setCC_r(cond, X86Registers::eax);
+            m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
+            m_assembler.xchgl_rr(dest, X86Registers::eax);
+            return;
+        }
+#endif
+        m_assembler.setCC_r(cond, dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+    void cmov(X86Assembler::Condition cond, RegisterID src, RegisterID dest)
+    {
+#if CPU(X86_64)
+        m_assembler.cmovq_rr(cond, src, dest);
+#else
+        m_assembler.cmovl_rr(cond, src, dest);
+#endif
+    }
+
+    static bool supportsLZCNT()
+    {
+        if (s_lzcntCheckState == CPUIDCheckState::NotChecked) {
+            int flags = 0;
+#if COMPILER(MSVC)
+            int cpuInfo[4];
+            __cpuid(cpuInfo, 0x80000001);
+            flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+#if CPU(X86_64)
+            asm (
+                "movl $0x80000001, %%eax;"
+                "cpuid;"
+                "movl %%ecx, %0;"
+                : "=g" (flags)
+                :
+                : "%eax", "%ebx", "%ecx", "%edx"
+                );
+#else
+            asm (
+                "movl $0x80000001, %%eax;"
+                "pushl %%ebx;"
+                "cpuid;"
+                "popl %%ebx;"
+                "movl %%ecx, %0;"
+                : "=g" (flags)
+                :
+                : "%eax", "%ecx", "%edx"
+                );
+#endif
+#endif // COMPILER(GCC_OR_CLANG)
+            s_lzcntCheckState = (flags & 0x20) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        }
+        return s_lzcntCheckState == CPUIDCheckState::Set;
+    }
+
+    static bool supportsBMI1()
+    {
+        if (s_bmi1CheckState == CPUIDCheckState::NotChecked) {
+            int flags = 0;
+#if COMPILER(MSVC)
+            int cpuInfo[4];
+            __cpuid(cpuInfo, 0x80000001);
+            flags = cpuInfo[2];
+#elif COMPILER(GCC_OR_CLANG)
+            asm (
+                 "movl $0x7, %%eax;"
+                 "movl $0x0, %%ecx;"
+                 "cpuid;"
+                 "movl %%ebx, %0;"
+                 : "=g" (flags)
+                 :
+                 : "%eax", "%ebx", "%ecx", "%edx"
+                 );
+#endif // COMPILER(GCC_OR_CLANG)
+            static int BMI1FeatureBit = 1 << 3;
+            s_bmi1CheckState = (flags & BMI1FeatureBit) ? CPUIDCheckState::Set : CPUIDCheckState::Clear;
+        }
+        return s_bmi1CheckState == CPUIDCheckState::Set;
+    }
+
+    template
+    void ctzAfterBsf(RegisterID dst)
+    {
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(sizeOfRegister), dst);
+        srcIsNonZero.link(this);
+    }
+
+private:
+    // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+    // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+    friend class MacroAssemblerX86;
+
+    ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base);
+        else if (!(mask.m_value & ~0xff))
+            m_assembler.testb_im(mask.m_value, address.offset, address.base);
+        else if (!(mask.m_value & ~0xff00))
+            m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
+        else if (!(mask.m_value & ~0xff0000))
+            m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
+        else if (!(mask.m_value & ~0xff000000))
+            m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+    }
+
+    // If lzcnt is not available, use this after BSR
+    // to count the leading zeros.
+    void clz32AfterBsr(RegisterID dst)
+    {
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(32), dst);
+
+        Jump skipNonZeroCase = jump();
+        srcIsNonZero.link(this);
+        xor32(TrustedImm32(0x1f), dst);
+        skipNonZeroCase.link(this);
+    }
+
+    Jump jumpAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        if (cond == DoubleEqual) {
+            if (left == right)
+                return Jump(m_assembler.jnp());
+            Jump isUnordered(m_assembler.jp());
+            Jump result = Jump(m_assembler.je());
+            isUnordered.link(this);
+            return result;
+        }
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right)
+                return Jump(m_assembler.jp());
+            Jump isUnordered(m_assembler.jp());
+            Jump isEqual(m_assembler.je());
+            isUnordered.link(this);
+            Jump result = jump();
+            isEqual.link(this);
+            return result;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        return Jump(m_assembler.jCC(static_cast(cond & ~DoubleConditionBits)));
+    }
+
+    // The 32bit Move does not need the REX byte for low registers, making it shorter.
+    // Use this if the top bits are irrelevant because they will be reset by the next instruction.
+    void move32IfNeeded(RegisterID src, RegisterID dest)
+    {
+        if (src == dest)
+            return;
+        m_assembler.movl_rr(src, dest);
+    }
+
+#if CPU(X86_64)
+    void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpq_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmoveq_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpq_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpq_rr(src, dest);
+            m_assembler.cmovneq_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        cmov(static_cast(cond & ~DoubleConditionBits), src, dest);
+    }
+#endif
+
+#if CPU(X86)
+#if OS(MAC_OS_X)
+
+    // All X86 Macs are guaranteed to support at least SSE2,
+    static bool isSSE2Present()
+    {
+        return true;
+    }
+
+#else // OS(MAC_OS_X)
+
+    enum SSE2CheckState {
+        NotCheckedSSE2,
+        HasSSE2,
+        NoSSE2
+    };
+
+    static bool isSSE2Present()
+    {
+        if (s_sse2CheckState == NotCheckedSSE2) {
+            // Default the flags value to zero; if the compiler is
+            // not MSVC or GCC we will read this as SSE2 not present.
+            int flags = 0;
+#if COMPILER(MSVC)
+            _asm {
+                mov eax, 1 // cpuid function 1 gives us the standard feature set
+                cpuid;
+                mov flags, edx;
+            }
+#elif COMPILER(GCC_OR_CLANG)
+            asm (
+                 "movl $0x1, %%eax;"
+                 "pushl %%ebx;"
+                 "cpuid;"
+                 "popl %%ebx;"
+                 "movl %%edx, %0;"
+                 : "=g" (flags)
+                 :
+                 : "%eax", "%ecx", "%edx"
+                 );
+#endif
+            static const int SSE2FeatureBit = 1 << 26;
+            s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+        }
+        // Only check once.
+        ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+        return s_sse2CheckState == HasSSE2;
+    }
+    
+    JS_EXPORTDATA static SSE2CheckState s_sse2CheckState;
+
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
+
+    // On x86-64 we should never be checking for SSE2 in a non-debug build,
+    // but non debug add this method to keep the asserts above happy.
+    static bool isSSE2Present()
+    {
+        return true;
+    }
+
+#endif
+
+    enum class CPUIDCheckState {
+        NotChecked,
+        Clear,
+        Set
+    };
+    JS_EXPORT_PRIVATE static CPUIDCheckState s_sse4_1CheckState;
+    JS_EXPORT_PRIVATE static CPUIDCheckState s_avxCheckState;
+    static CPUIDCheckState s_bmi1CheckState;
+    static CPUIDCheckState s_lzcntCheckState;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MacroAssemblerX86_64.h b/assembler/MacroAssemblerX86_64.h
new file mode 100644
index 0000000..7e18412
--- /dev/null
+++ b/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,1525 @@
+/*
+ * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
+
+#include "MacroAssemblerX86Common.h"
+
+#define REPATCH_OFFSET_CALL_R11 3
+
+inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
+
+namespace JSC {
+
+class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
+public:
+    static const unsigned numGPRs = 16;
+    static const unsigned numFPRs = 16;
+    
+    static const Scale ScalePtr = TimesEight;
+
+    using MacroAssemblerX86Common::add32;
+    using MacroAssemblerX86Common::and32;
+    using MacroAssemblerX86Common::branch32;
+    using MacroAssemblerX86Common::branchAdd32;
+    using MacroAssemblerX86Common::or32;
+    using MacroAssemblerX86Common::sub32;
+    using MacroAssemblerX86Common::load8;
+    using MacroAssemblerX86Common::load32;
+    using MacroAssemblerX86Common::store32;
+    using MacroAssemblerX86Common::store8;
+    using MacroAssemblerX86Common::call;
+    using MacroAssemblerX86Common::jump;
+    using MacroAssemblerX86Common::addDouble;
+    using MacroAssemblerX86Common::loadDouble;
+    using MacroAssemblerX86Common::convertInt32ToDouble;
+
+    void add32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add32(imm, Address(scratchRegister()));
+    }
+    
+    void and32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        and32(imm, Address(scratchRegister()));
+    }
+    
+    void add32(AbsoluteAddress address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add32(Address(scratchRegister()), dest);
+    }
+    
+    void or32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        or32(imm, Address(scratchRegister()));
+    }
+
+    void or32(RegisterID reg, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        or32(reg, Address(scratchRegister()));
+    }
+
+    void sub32(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        sub32(imm, Address(scratchRegister()));
+    }
+    
+    void load8(const void* address, RegisterID dest)
+    {
+        move(TrustedImmPtr(address), dest);
+        load8(dest, dest);
+    }
+
+    void load32(const void* address, RegisterID dest)
+    {
+        if (dest == X86Registers::eax)
+            m_assembler.movl_mEAX(address);
+        else {
+            move(TrustedImmPtr(address), dest);
+            load32(dest, dest);
+        }
+    }
+
+    void addDouble(AbsoluteAddress address, FPRegisterID dest)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        m_assembler.addsd_mr(0, scratchRegister(), dest);
+    }
+
+    void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+    {
+        move(imm, scratchRegister());
+        m_assembler.cvtsi2sd_rr(scratchRegister(), dest);
+    }
+
+    void store32(TrustedImm32 imm, void* address)
+    {
+        move(TrustedImmPtr(address), scratchRegister());
+        store32(imm, scratchRegister());
+    }
+
+    void store32(RegisterID source, void* address)
+    {
+        if (source == X86Registers::eax)
+            m_assembler.movl_EAXm(address);
+        else {
+            move(TrustedImmPtr(address), scratchRegister());
+            store32(source, scratchRegister());
+        }
+    }
+    
+    void store8(TrustedImm32 imm, void* address)
+    {
+        TrustedImm32 imm8(static_cast(imm.m_value));
+        move(TrustedImmPtr(address), scratchRegister());
+        store8(imm8, Address(scratchRegister()));
+    }
+
+    void store8(RegisterID reg, void* address)
+    {
+        move(TrustedImmPtr(address), scratchRegister());
+        store8(reg, Address(scratchRegister()));
+    }
+
+#if OS(WINDOWS)
+    Call callWithSlowPathReturnType()
+    {
+        // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+        // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
+        // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
+        // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
+        // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
+        // It is assumed that the parameters are already shifted to the right, when entering this method.
+        // Note: this implementation supports up to 3 parameters.
+
+        // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+        // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+        // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+        store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+        // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+        // In addition, we need to allocate 16 bytes for the return value.
+        // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+        sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+        // The first parameter register should contain a pointer to the stack allocated space for the return value.
+        move(X86Registers::esp, X86Registers::ecx);
+        add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
+
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
+
+        add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+
+        // Copy the return value into rax and rdx.
+        load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
+        load64(Address(X86Registers::eax), X86Registers::eax);
+
+        ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
+        return result;
+    }
+#endif
+
+    Call call()
+    {
+#if OS(WINDOWS)
+        // JIT relies on the CallerFrame (frame pointer) being put on the stack,
+        // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
+        // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
+        store64(X86Registers::ebp, Address(X86Registers::esp, -16));
+
+        // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
+        // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
+
+        // Copy argument 5
+        load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister());
+        store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast(sizeof(int64_t))));
+
+        // Copy argument 6
+        load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister());
+        store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast(sizeof(int64_t))));
+
+        // We also need to allocate the shadow space on the stack for the 4 parameter registers.
+        // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
+        // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
+        sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable);
+#if OS(WINDOWS)
+        add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
+#endif
+        ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11);
+        return result;
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        jump(Address(scratchRegister()));
+    }
+
+    Call tailRecursiveCall()
+    {
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
+        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
+        return Call::fromTailJump(newJump);
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister());
+        Jump newJump = Jump(m_assembler.jmp_r(scratchRegister()));
+        ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11);
+        return Call::fromTailJump(newJump);
+    }
+
+    Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
+    {
+        move(TrustedImmPtr(dest.m_ptr), scratchRegister());
+        add32(src, Address(scratchRegister()));
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    void add64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.addq_rr(src, dest);
+    }
+    
+    void add64(Address src, RegisterID dest)
+    {
+        m_assembler.addq_mr(src.offset, src.base, dest);
+    }
+
+    void add64(RegisterID src, Address dest)
+    {
+        m_assembler.addq_rm(src, dest.offset, dest.base);
+    }
+
+    void add64(AbsoluteAddress src, RegisterID dest)
+    {
+        move(TrustedImmPtr(src.m_ptr), scratchRegister());
+        add64(Address(scratchRegister()), dest);
+    }
+
+    void add64(TrustedImm32 imm, RegisterID srcDest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.incq_r(srcDest);
+        else
+            m_assembler.addq_ir(imm.m_value, srcDest);
+    }
+
+    void add64(TrustedImm64 imm, RegisterID dest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.incq_r(dest);
+        else {
+            move(imm, scratchRegister());
+            add64(scratchRegister(), dest);
+        }
+    }
+
+    void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.leaq_mr(imm.m_value, src, dest);
+    }
+
+    void add64(TrustedImm32 imm, Address address)
+    {
+        if (imm.m_value == 1)
+            m_assembler.incq_m(address.offset, address.base);
+        else
+            m_assembler.addq_im(imm.m_value, address.offset, address.base);
+    }
+
+    void add64(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        add64(imm, Address(scratchRegister()));
+    }
+
+    void add64(RegisterID a, RegisterID b, RegisterID dest)
+    {
+        x86Lea64(BaseIndex(a, b, TimesOne), dest);
+    }
+
+    void x86Lea64(BaseIndex index, RegisterID dest)
+    {
+        if (!index.scale && !index.offset) {
+            if (index.base == dest) {
+                add64(index.index, dest);
+                return;
+            }
+            if (index.index == dest) {
+                add64(index.base, dest);
+                return;
+            }
+        }
+        m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest);
+    }
+
+    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+    {
+        m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
+    }
+
+    void and64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andq_rr(src, dest);
+    }
+
+    void and64(TrustedImm32 imm, RegisterID srcDest)
+    {
+        m_assembler.andq_ir(imm.m_value, srcDest);
+    }
+
+    void and64(TrustedImmPtr imm, RegisterID srcDest)
+    {
+        intptr_t intValue = imm.asIntptr();
+        if (intValue <= std::numeric_limits::max()
+            && intValue >= std::numeric_limits::min()) {
+            and64(TrustedImm32(static_cast(intValue)), srcDest);
+            return;
+        }
+        move(imm, scratchRegister());
+        and64(scratchRegister(), srcDest);
+    }
+
+    void and64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2 && op1 != dest && op2 != dest)
+            move(op1, dest);
+        else if (op1 == dest)
+            and64(op2, dest);
+        else {
+            move(op2, dest);
+            and64(op1, dest);
+        }
+    }
+
+    void countLeadingZeros64(RegisterID src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcntq_rr(src, dst);
+            return;
+        }
+        m_assembler.bsrq_rr(src, dst);
+        clz64AfterBsr(dst);
+    }
+
+    void countLeadingZeros64(Address src, RegisterID dst)
+    {
+        if (supportsLZCNT()) {
+            m_assembler.lzcntq_mr(src.offset, src.base, dst);
+            return;
+        }
+        m_assembler.bsrq_mr(src.offset, src.base, dst);
+        clz64AfterBsr(dst);
+    }
+
+    void countTrailingZeros64(RegisterID src, RegisterID dst)
+    {
+        if (supportsBMI1()) {
+            m_assembler.tzcntq_rr(src, dst);
+            return;
+        }
+        m_assembler.bsfq_rr(src, dst);
+        ctzAfterBsf<64>(dst);
+    }
+
+    void lshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.shlq_i8r(imm.m_value, dest);
+    }
+    
+    void lshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.shlq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+    
+    void rshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.sarq_i8r(imm.m_value, dest);
+    }
+
+    void rshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.sarq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+            
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void urshift64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.shrq_i8r(imm.m_value, dest);
+    }
+
+    void urshift64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.shrq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+            
+            // Can only shift by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateRight64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rorq_i8r(imm.m_value, dest);
+    }
+
+    void rotateRight64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rorq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void rotateLeft64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.rolq_i8r(imm.m_value, dest);
+    }
+
+    void rotateLeft64(RegisterID src, RegisterID dest)
+    {
+        if (src == X86Registers::ecx)
+            m_assembler.rolq_CLr(dest);
+        else {
+            ASSERT(src != dest);
+
+            // Can only rotate by ecx, so we do some swapping if we see anything else.
+            swap(src, X86Registers::ecx);
+            m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest);
+            swap(src, X86Registers::ecx);
+        }
+    }
+
+    void mul64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.imulq_rr(src, dest);
+    }
+
+    void mul64(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src2 == dest) {
+            m_assembler.imulq_rr(src1, dest);
+            return;
+        }
+        move(src1, dest);
+        m_assembler.imulq_rr(src2, dest);
+    }
+    
+    void x86ConvertToQuadWord64()
+    {
+        m_assembler.cqo();
+    }
+
+    void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86ConvertToQuadWord64();
+    }
+
+    void x86Div64(RegisterID denominator)
+    {
+        m_assembler.idivq_r(denominator);
+    }
+
+    void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86Div64(denominator);
+    }
+
+    void x86UDiv64(RegisterID denominator)
+    {
+        m_assembler.divq_r(denominator);
+    }
+
+    void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator)
+    {
+        ASSERT_UNUSED(rax, rax == X86Registers::eax);
+        ASSERT_UNUSED(rdx, rdx == X86Registers::edx);
+        x86UDiv64(denominator);
+    }
+
+    void neg64(RegisterID dest)
+    {
+        m_assembler.negq_r(dest);
+    }
+
+    void or64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orq_rr(src, dest);
+    }
+
+    void or64(TrustedImm64 imm, RegisterID srcDest)
+    {
+        if (imm.m_value <= std::numeric_limits::max()
+            && imm.m_value >= std::numeric_limits::min()) {
+            or64(TrustedImm32(static_cast(imm.m_value)), srcDest);
+            return;
+        }
+        move(imm, scratchRegister());
+        or64(scratchRegister(), srcDest);
+    }
+
+    void or64(TrustedImm32 imm, RegisterID dest)
+    {
+        m_assembler.orq_ir(imm.m_value, dest);
+    }
+
+    void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            move(op1, dest);
+        else if (op1 == dest)
+            or64(op2, dest);
+        else {
+            move(op2, dest);
+            or64(op1, dest);
+        }
+    }
+
+    void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+        or64(imm, dest);
+    }
+
+    void sub64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subq_rr(src, dest);
+    }
+    
+    void sub64(TrustedImm32 imm, RegisterID dest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.decq_r(dest);
+        else
+            m_assembler.subq_ir(imm.m_value, dest);
+    }
+    
+    void sub64(TrustedImm64 imm, RegisterID dest)
+    {
+        if (imm.m_value == 1)
+            m_assembler.decq_r(dest);
+        else {
+            move(imm, scratchRegister());
+            sub64(scratchRegister(), dest);
+        }
+    }
+
+    void sub64(TrustedImm32 imm, Address address)
+    {
+        m_assembler.subq_im(imm.m_value, address.offset, address.base);
+    }
+
+    void sub64(Address src, RegisterID dest)
+    {
+        m_assembler.subq_mr(src.offset, src.base, dest);
+    }
+
+    void sub64(RegisterID src, Address dest)
+    {
+        m_assembler.subq_rm(src, dest.offset, dest.base);
+    }
+
+    void xor64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xorq_rr(src, dest);
+    }
+
+    void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
+    {
+        if (op1 == op2)
+            move(TrustedImm32(0), dest);
+        else if (op1 == dest)
+            xor64(op2, dest);
+        else {
+            move(op2, dest);
+            xor64(op1, dest);
+        }
+    }
+    
+    void xor64(RegisterID src, Address dest)
+    {
+        m_assembler.xorq_rm(src, dest.offset, dest.base);
+    }
+
+    void xor64(TrustedImm32 imm, RegisterID srcDest)
+    {
+        m_assembler.xorq_ir(imm.m_value, srcDest);
+    }
+
+    void not64(RegisterID srcDest)
+    {
+        m_assembler.notq_r(srcDest);
+    }
+
+    void not64(Address dest)
+    {
+        m_assembler.notq_m(dest.offset, dest.base);
+    }
+
+    void load64(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.movq_mr(address.offset, address.base, dest);
+    }
+
+    void load64(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void load64(const void* address, RegisterID dest)
+    {
+        if (dest == X86Registers::eax)
+            m_assembler.movq_mEAX(address);
+        else {
+            move(TrustedImmPtr(address), dest);
+            load64(dest, dest);
+        }
+    }
+
+    DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+        return DataLabel32(this);
+    }
+    
+    DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movq_mr_disp8(address.offset, address.base, dest);
+        return DataLabelCompact(this);
+    }
+
+    void store64(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.movq_rm(src, address.offset, address.base);
+    }
+
+    void store64(RegisterID src, BaseIndex address)
+    {
+        m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+    
+    void store64(RegisterID src, void* address)
+    {
+        if (src == X86Registers::eax)
+            m_assembler.movq_EAXm(address);
+        else {
+            move(TrustedImmPtr(address), scratchRegister());
+            store64(src, scratchRegister());
+        }
+    }
+
+    void store64(TrustedImm32 imm, ImplicitAddress address)
+    {
+        m_assembler.movq_i32m(imm.m_value, address.offset, address.base);
+    }
+
+    void store64(TrustedImm64 imm, ImplicitAddress address)
+    {
+        if (CAN_SIGN_EXTEND_32_64(imm.m_value)) {
+            store64(TrustedImm32(static_cast(imm.m_value)), address);
+            return;
+        }
+
+        move(imm, scratchRegister());
+        store64(scratchRegister(), address);
+    }
+
+    void store64(TrustedImm64 imm, BaseIndex address)
+    {
+        move(imm, scratchRegister());
+        m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale);
+    }
+    
+    DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        padBeforePatch();
+        m_assembler.movq_rm_disp32(src, address.offset, address.base);
+        return DataLabel32(this);
+    }
+
+    void swap64(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xchgq_rr(src, dest);
+    }
+
+    void swap64(RegisterID src, Address dest)
+    {
+        m_assembler.xchgq_rm(src, dest.offset, dest.base);
+    }
+
+    void move64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.movq_rr(src, dest);
+    }
+
+    void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.movq_rr(src, dest);
+    }
+
+    void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                test64(*resultCondition, left, left, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpq_ir(right.m_value, left);
+        set32(x86Condition(cond), dest);
+    }
+    
+    void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+        set32(x86Condition(cond), dest);
+    }
+
+    void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+    {
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.setnp_r(dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.sete_r(dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.setp_r(dest);
+                return;
+            }
+
+            m_assembler.setp_r(dest);
+            m_assembler.setne_r(dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.setCC_r(static_cast(cond & ~DoubleConditionBits), dest);
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmpq_rr(right, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+                return branchTest64(*resultCondition, left, left);
+        }
+        m_assembler.cmpq_ir(right.m_value, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
+            m_assembler.testq_rr(left, left);
+            return Jump(m_assembler.jCC(x86Condition(cond)));
+        }
+        move(right, scratchRegister());
+        return branch64(cond, left, scratchRegister());
+    }
+
+    Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+    {
+        m_assembler.cmpq_mr(right.offset, right.base, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        move(TrustedImmPtr(left.m_ptr), scratchRegister());
+        return branch64(cond, Address(scratchRegister()), right);
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+    {
+        m_assembler.cmpq_rm(right, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right)
+    {
+        m_assembler.cmpq_im(right.m_value, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+    {
+        move(right, scratchRegister());
+        return branch64(cond, left, scratchRegister());
+    }
+
+    Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
+    {
+        m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, scratchRegister());
+        return branch32(cond, scratchRegister(), right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+    {
+        return branch64(cond, left, right);
+    }
+
+    Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
+    {
+        move(right, scratchRegister());
+        return branchPtr(cond, left, scratchRegister());
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+    {
+        m_assembler.testq_rr(reg, mask);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        // if we are only interested in the low seven bits, this can be tested with a testb
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(reg, reg);
+        else if ((mask.m_value & ~0x7f) == 0)
+            m_assembler.testb_i8r(mask.m_value, reg);
+        else
+            m_assembler.testq_i32r(mask.m_value, reg);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
+    {
+        move(mask, scratchRegister());
+        return branchTest64(cond, reg, scratchRegister());
+    }
+
+    void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(reg, reg);
+        else if ((mask.m_value & ~0x7f) == 0)
+            m_assembler.testb_i8r(mask.m_value, reg);
+        else
+            m_assembler.testq_i32r(mask.m_value, reg);
+        set32(x86Condition(cond), dest);
+    }
+
+    void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.testq_rr(reg, mask);
+        set32(x86Condition(cond), dest);
+    }
+
+    Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        load64(address.m_ptr, scratchRegister());
+        return branchTest64(cond, scratchRegister(), mask);
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpq_im(0, address.offset, address.base);
+        else
+            m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
+    {
+        m_assembler.testq_rm(reg, address.offset, address.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+        else
+            m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+
+    Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        add64(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchAdd64(cond, src2, dest);
+        move(src2, dest);
+        return branchAdd64(cond, src1, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest)
+    {
+        if (op2 == dest)
+            return branchAdd64(cond, op1, dest);
+        if (op1.base == dest) {
+            load32(op1, dest);
+            return branchAdd64(cond, op2, dest);
+        }
+        move(op2, dest);
+        return branchAdd64(cond, op1, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest)
+    {
+        return branchAdd64(cond, src2, src1, dest);
+    }
+
+    Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        add64(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest)
+    {
+        add64(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        mul64(src, dest);
+        if (cond != Overflow)
+            m_assembler.testq_rr(dest, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest)
+            return branchMul64(cond, src2, dest);
+        move(src2, dest);
+        return branchMul64(cond, src1, dest);
+    }
+
+    Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+    {
+        sub64(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+    {
+        sub64(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+    {
+        move(src1, dest);
+        return branchSub64(cond, src2, dest);
+    }
+
+    Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
+    {
+        neg64(srcDest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        if (!right.m_value) {
+            if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+                moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+                return;
+            }
+        }
+
+        m_assembler.cmpq_ir(right.m_value, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.testq_rr(testReg, mask);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        m_assembler.testq_rr(right, left);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+    
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest)
+    {
+        // if we are only interested in the low seven bits, this can be tested with a testb
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(testReg, testReg);
+        else if ((mask.m_value & ~0x7f) == 0)
+            m_assembler.testb_i8r(mask.m_value, testReg);
+        else
+            m_assembler.testq_i32r(mask.m_value, testReg);
+        cmov(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+    {
+        ASSERT(isInvertible(cond));
+        ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag.");
+
+        if (mask.m_value == -1)
+            m_assembler.testq_rr(testReg, testReg);
+        else if (!(mask.m_value & ~0x7f))
+            m_assembler.testb_i8r(mask.m_value, testReg);
+        else
+            m_assembler.testq_i32r(mask.m_value, testReg);
+
+        if (thenCase != dest && elseCase != dest) {
+            move(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest)
+            cmov(x86Condition(cond), thenCase, dest);
+        else
+            cmov(x86Condition(invert(cond)), elseCase, dest);
+    }
+
+    template
+    void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (thenCase != dest && elseCase != dest) {
+            moveDouble(elseCase, dest);
+            elseCase = dest;
+        }
+
+        if (elseCase == dest) {
+            Jump falseCase = branch64(invert(cond), left, right);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else {
+            Jump trueCase = branch64(cond, left, right);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+    }
+
+    template
+    void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+    {
+        static_assert(!std::is_same::value && !std::is_same::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble().");
+
+        if (elseCase == dest && isInvertible(cond)) {
+            Jump falseCase = branchTest64(invert(cond), test, mask);
+            moveDouble(thenCase, dest);
+            falseCase.link(this);
+        } else if (thenCase == dest) {
+            Jump trueCase = branchTest64(cond, test, mask);
+            moveDouble(elseCase, dest);
+            trueCase.link(this);
+        }
+
+        Jump trueCase = branchTest64(cond, test, mask);
+        moveDouble(elseCase, dest);
+        Jump falseCase = jump();
+        trueCase.link(this);
+        moveDouble(thenCase, dest);
+        falseCase.link(this);
+    }
+    
+    void abortWithReason(AbortReason reason)
+    {
+        move(TrustedImm32(reason), X86Registers::r11);
+        breakpoint();
+    }
+
+    void abortWithReason(AbortReason reason, intptr_t misc)
+    {
+        move(TrustedImm64(misc), X86Registers::r10);
+        abortWithReason(reason);
+    }
+
+    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+    {
+        ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+        m_assembler.movq_mr(address.offset, address.base, dest);
+        return result;
+    }
+
+    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movq_i64r(initialValue.asIntptr(), dest);
+        return DataLabelPtr(this);
+    }
+
+    DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
+    {
+        padBeforePatch();
+        m_assembler.movq_i64r(initialValue.m_value, dest);
+        return DataLabelPtr(this);
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = moveWithPatch(initialRightValue, scratchRegister());
+        return branch64(cond, left, scratchRegister());
+    }
+
+    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+    {
+        dataLabel = moveWithPatch(initialRightValue, scratchRegister());
+        return branch64(cond, left, scratchRegister());
+    }
+
+    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        padBeforePatch();
+        m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister());
+        dataLabel = DataLabel32(this);
+        return branch32(cond, left, scratchRegister());
+    }
+
+    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr label = moveWithPatch(initialValue, scratchRegister());
+        store64(scratchRegister(), address);
+        return label;
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+    {
+        return PatchableJump(branch64(cond, reg, imm));
+    }
+
+    PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+    {
+        return PatchableJump(branch64(cond, left, right));
+    }
+    
+    using MacroAssemblerX86Common::branch8;
+    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+    {
+        MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister());
+        return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right);
+    }
+    
+    using MacroAssemblerX86Common::branchTest8;
+    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        TrustedImmPtr addr(reinterpret_cast(address.offset));
+        MacroAssemblerX86Common::move(addr, scratchRegister());
+        return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8);
+    }
+    
+    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        TrustedImm32 mask8(static_cast(mask.m_value));
+        MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister());
+        return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8);
+    }
+
+    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttsd2siq_rr(src, dest);
+    }
+
+    void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttsd2siq_rr(src, dest);
+    }
+
+    // int64Min should contain exactly 0x43E0000000000000 == static_cast(int64_t::min()). scratch may
+    // be the same FPR as src.
+    void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
+    {
+        ASSERT(scratch != int64Min);
+
+        // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
+        // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
+        // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
+        // uint64_t; then add back int64_t::min() in the destination gpr.
+
+        Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min);
+        m_assembler.cvttsd2siq_rr(src, dest);
+        Jump done = jump();
+        large.link(this);
+        moveDouble(src, scratch);
+        m_assembler.subsd_rr(int64Min, scratch);
+        m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
+        m_assembler.cvttsd2siq_rr(scratch, dest);
+        m_assembler.orq_rr(scratchRegister(), dest);
+        done.link(this);
+    }
+
+    void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttss2siq_rr(src, dest);
+    }
+
+    void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
+    {
+        m_assembler.cvttss2siq_rr(src, dest);
+    }
+
+    // int64Min should contain exactly 0x5f000000 == static_cast(int64_t::min()). scratch may be the
+    // same FPR as src.
+    void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min)
+    {
+        ASSERT(scratch != int64Min);
+
+        // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed
+        // integer conversion instruction. If the src is less than int64_t::min() then the results of the two
+        // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to
+        // uint64_t; then add back int64_t::min() in the destination gpr.
+
+        Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min);
+        m_assembler.cvttss2siq_rr(src, dest);
+        Jump done = jump();
+        large.link(this);
+        moveDouble(src, scratch);
+        m_assembler.subss_rr(int64Min, scratch);
+        m_assembler.movq_i64r(0x8000000000000000, scratchRegister());
+        m_assembler.cvttss2siq_rr(scratch, dest);
+        m_assembler.orq_rr(scratchRegister(), dest);
+        done.link(this);
+    }
+
+    void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2sdq_rr(src, dest);
+    }
+
+    void convertInt64ToDouble(Address src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest);
+    }
+
+    void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2ssq_rr(src, dest);
+    }
+
+    void convertInt64ToFloat(Address src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest);
+    }
+
+    // One of scratch or scratch2 may be the same as src
+    void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch)
+    {
+        RegisterID scratch2 = scratchRegister();
+
+        m_assembler.testq_rr(src, src);
+        AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
+        m_assembler.cvtsi2sdq_rr(src, dest);
+        AssemblerLabel done = m_assembler.jmp();
+        m_assembler.linkJump(signBitSet, m_assembler.label());
+        if (scratch != src)
+            m_assembler.movq_rr(src, scratch);
+        m_assembler.movq_rr(src, scratch2);
+        m_assembler.shrq_i8r(1, scratch);
+        m_assembler.andq_ir(1, scratch2);
+        m_assembler.orq_rr(scratch, scratch2);
+        m_assembler.cvtsi2sdq_rr(scratch2, dest);
+        m_assembler.addsd_rr(dest, dest);
+        m_assembler.linkJump(done, m_assembler.label());
+    }
+
+    // One of scratch or scratch2 may be the same as src
+    void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch)
+    {
+        RegisterID scratch2 = scratchRegister();
+        m_assembler.testq_rr(src, src);
+        AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed));
+        m_assembler.cvtsi2ssq_rr(src, dest);
+        AssemblerLabel done = m_assembler.jmp();
+        m_assembler.linkJump(signBitSet, m_assembler.label());
+        if (scratch != src)
+            m_assembler.movq_rr(src, scratch);
+        m_assembler.movq_rr(src, scratch2);
+        m_assembler.shrq_i8r(1, scratch);
+        m_assembler.andq_ir(1, scratch2);
+        m_assembler.orq_rr(scratch, scratch2);
+        m_assembler.cvtsi2ssq_rr(scratch2, dest);
+        m_assembler.addss_rr(dest, dest);
+        m_assembler.linkJump(done, m_assembler.label());
+    }
+
+    static bool supportsFloatingPoint() { return true; }
+    static bool supportsFloatingPointTruncate() { return true; }
+    static bool supportsFloatingPointSqrt() { return true; }
+    static bool supportsFloatingPointAbs() { return true; }
+    
+    static FunctionPtr readCallTarget(CodeLocationCall call)
+    {
+        return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation()));
+    }
+
+    bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; }
+    RegisterID scratchRegisterForBlinding() { return scratchRegister(); }
+
+    static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+    static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
+    
+    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+    {
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        const int immediateBytes = 8;
+        const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
+    static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
+    {
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        const int immediateBytes = 4;
+        const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+        ASSERT(totalBytes >= maxJumpReplacementSize());
+        return label.labelAtOffset(-totalBytes);
+    }
+    
+    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+    {
+        return startOfBranchPtrWithPatchOnRegister(label);
+    }
+
+    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
+    {
+        return startOfBranch32WithPatchOnRegister(label);
+    }
+    
+    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+    {
+        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), s_scratchRegister);
+    }
+
+    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
+    {
+        X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister);
+    }
+
+    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+    {
+        X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), s_scratchRegister);
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+    }
+
+private:
+    // If lzcnt is not available, use this after BSR
+    // to count the leading zeros.
+    void clz64AfterBsr(RegisterID dst)
+    {
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(64), dst);
+
+        Jump skipNonZeroCase = jump();
+        srcIsNonZero.link(this);
+        xor64(TrustedImm32(0x3f), dst);
+        skipNonZeroCase.link(this);
+    }
+
+    friend class LinkBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        if (!call.isFlagSet(Call::Near))
+            X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value());
+        else if (call.isFlagSet(Call::Tail))
+            X86Assembler::linkJump(code, call.m_label, function.value());
+        else
+            X86Assembler::linkCall(code, call.m_label, function.value());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/assembler/MaxFrameExtentForSlowPathCall.h b/assembler/MaxFrameExtentForSlowPathCall.h
new file mode 100644
index 0000000..04ca0a7
--- /dev/null
+++ b/assembler/MaxFrameExtentForSlowPathCall.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Register.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC {
+
+// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes)
+// that can be used for outgoing args when calling a slow path C function
+// from JS code.
+
+#if !ENABLE(JIT)
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86_64) && OS(WINDOWS)
+// 4 args in registers, but stack space needs to be allocated for all args.
+static const size_t maxFrameExtentForSlowPathCall = 64;
+
+#elif CPU(X86_64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(X86)
+// 7 args on stack (28 bytes).
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#elif CPU(ARM64)
+// All args in registers.
+static const size_t maxFrameExtentForSlowPathCall = 0;
+
+#elif CPU(ARM)
+// First four args in registers, remaining 4 args on stack.
+static const size_t maxFrameExtentForSlowPathCall = 24;
+
+#elif CPU(SH4)
+// First four args in registers, remaining 4 args on stack.
+static const size_t maxFrameExtentForSlowPathCall = 24;
+
+#elif CPU(MIPS)
+// Though args are in registers, there need to be space on the stack for all args.
+static const size_t maxFrameExtentForSlowPathCall = 40;
+
+#else
+#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall"
+
+#endif
+
+COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers);
+
+#if ENABLE(JIT)
+// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned
+COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer);
+#endif
+
+static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register);
+
+} // namespace JSC
diff --git a/assembler/SH4Assembler.h b/assembler/SH4Assembler.h
new file mode 100644
index 0000000..52217d8
--- /dev/null
+++ b/assembler/SH4Assembler.h
@@ -0,0 +1,2222 @@
+/*
+ * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#ifndef NDEBUG
+#define SH4_ASSEMBLER_TRACING
+#endif
+
+namespace JSC {
+typedef uint16_t SH4Word;
+
+enum {
+    INVALID_OPCODE = 0xffff,
+    ADD_OPCODE = 0x300c,
+    ADDIMM_OPCODE = 0x7000,
+    ADDC_OPCODE = 0x300e,
+    ADDV_OPCODE = 0x300f,
+    AND_OPCODE = 0x2009,
+    ANDIMM_OPCODE = 0xc900,
+    DIV0_OPCODE = 0x2007,
+    DIV1_OPCODE = 0x3004,
+    BF_OPCODE = 0x8b00,
+    BFS_OPCODE = 0x8f00,
+    BRA_OPCODE = 0xa000,
+    BRAF_OPCODE = 0x0023,
+    NOP_OPCODE = 0x0009,
+    BSR_OPCODE = 0xb000,
+    RTS_OPCODE = 0x000b,
+    BT_OPCODE = 0x8900,
+    BTS_OPCODE = 0x8d00,
+    BSRF_OPCODE = 0x0003,
+    BRK_OPCODE = 0x003b,
+    FTRC_OPCODE = 0xf03d,
+    CMPEQ_OPCODE = 0x3000,
+    CMPEQIMM_OPCODE = 0x8800,
+    CMPGE_OPCODE = 0x3003,
+    CMPGT_OPCODE = 0x3007,
+    CMPHI_OPCODE = 0x3006,
+    CMPHS_OPCODE = 0x3002,
+    CMPPL_OPCODE = 0x4015,
+    CMPPZ_OPCODE = 0x4011,
+    CMPSTR_OPCODE = 0x200c,
+    DT_OPCODE = 0x4010,
+    FCMPEQ_OPCODE = 0xf004,
+    FCMPGT_OPCODE = 0xf005,
+    FMOV_OPCODE = 0xf00c,
+    FADD_OPCODE = 0xf000,
+    FMUL_OPCODE = 0xf002,
+    FSUB_OPCODE = 0xf001,
+    FDIV_OPCODE = 0xf003,
+    FNEG_OPCODE = 0xf04d,
+    JMP_OPCODE = 0x402b,
+    JSR_OPCODE = 0x400b,
+    LDSPR_OPCODE = 0x402a,
+    LDSLPR_OPCODE = 0x4026,
+    MOV_OPCODE = 0x6003,
+    MOVIMM_OPCODE = 0xe000,
+    MOVB_WRITE_RN_OPCODE = 0x2000,
+    MOVB_WRITE_RNDEC_OPCODE = 0x2004,
+    MOVB_WRITE_R0RN_OPCODE = 0x0004,
+    MOVB_WRITE_OFFGBR_OPCODE = 0xc000,
+    MOVB_WRITE_OFFRN_OPCODE = 0x8000,
+    MOVB_READ_RM_OPCODE = 0x6000,
+    MOVB_READ_RMINC_OPCODE = 0x6004,
+    MOVB_READ_R0RM_OPCODE = 0x000c,
+    MOVB_READ_OFFGBR_OPCODE = 0xc400,
+    MOVB_READ_OFFRM_OPCODE = 0x8400,
+    MOVL_WRITE_RN_OPCODE = 0x2002,
+    MOVL_WRITE_RNDEC_OPCODE = 0x2006,
+    MOVL_WRITE_R0RN_OPCODE = 0x0006,
+    MOVL_WRITE_OFFGBR_OPCODE = 0xc200,
+    MOVL_WRITE_OFFRN_OPCODE = 0x1000,
+    MOVL_READ_RM_OPCODE = 0x6002,
+    MOVL_READ_RMINC_OPCODE = 0x6006,
+    MOVL_READ_R0RM_OPCODE = 0x000e,
+    MOVL_READ_OFFGBR_OPCODE = 0xc600,
+    MOVL_READ_OFFPC_OPCODE = 0xd000,
+    MOVL_READ_OFFRM_OPCODE = 0x5000,
+    MOVW_WRITE_RN_OPCODE = 0x2001,
+    MOVW_WRITE_R0RN_OPCODE = 0x0005,
+    MOVW_READ_RM_OPCODE = 0x6001,
+    MOVW_READ_RMINC_OPCODE = 0x6005,
+    MOVW_READ_R0RM_OPCODE = 0x000d,
+    MOVW_READ_OFFRM_OPCODE = 0x8500,
+    MOVW_READ_OFFPC_OPCODE = 0x9000,
+    MOVA_READ_OFFPC_OPCODE = 0xc700,
+    MOVT_OPCODE = 0x0029,
+    MULL_OPCODE = 0x0007,
+    DMULL_L_OPCODE = 0x3005,
+    STSMACL_OPCODE = 0x001a,
+    STSMACH_OPCODE = 0x000a,
+    DMULSL_OPCODE = 0x300d,
+    NEG_OPCODE = 0x600b,
+    NEGC_OPCODE = 0x600a,
+    NOT_OPCODE = 0x6007,
+    OR_OPCODE = 0x200b,
+    ORIMM_OPCODE = 0xcb00,
+    ORBIMM_OPCODE = 0xcf00,
+    SETS_OPCODE = 0x0058,
+    SETT_OPCODE = 0x0018,
+    SHAD_OPCODE = 0x400c,
+    SHAL_OPCODE = 0x4020,
+    SHAR_OPCODE = 0x4021,
+    SHLD_OPCODE = 0x400d,
+    SHLL_OPCODE = 0x4000,
+    SHLL2_OPCODE = 0x4008,
+    SHLL8_OPCODE = 0x4018,
+    SHLL16_OPCODE = 0x4028,
+    SHLR_OPCODE = 0x4001,
+    SHLR2_OPCODE = 0x4009,
+    SHLR8_OPCODE = 0x4019,
+    SHLR16_OPCODE = 0x4029,
+    STSPR_OPCODE = 0x002a,
+    STSLPR_OPCODE = 0x4022,
+    FLOAT_OPCODE = 0xf02d,
+    SUB_OPCODE = 0x3008,
+    SUBC_OPCODE = 0x300a,
+    SUBV_OPCODE = 0x300b,
+    TST_OPCODE = 0x2008,
+    TSTIMM_OPCODE = 0xc800,
+    TSTB_OPCODE = 0xcc00,
+    EXTUB_OPCODE = 0x600c,
+    EXTUW_OPCODE = 0x600d,
+    XOR_OPCODE = 0x200a,
+    XORIMM_OPCODE = 0xca00,
+    XORB_OPCODE = 0xce00,
+    FMOVS_READ_RM_INC_OPCODE = 0xf009,
+    FMOVS_READ_RM_OPCODE = 0xf008,
+    FMOVS_READ_R0RM_OPCODE = 0xf006,
+    FMOVS_WRITE_RN_OPCODE = 0xf00a,
+    FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b,
+    FMOVS_WRITE_R0RN_OPCODE = 0xf007,
+    FCNVDS_DRM_FPUL_OPCODE = 0xf0bd,
+    FCNVSD_FPUL_DRN_OPCODE = 0xf0ad,
+    LDS_RM_FPUL_OPCODE = 0x405a,
+    FLDS_FRM_FPUL_OPCODE = 0xf01d,
+    STS_FPUL_RN_OPCODE = 0x005a,
+    FSTS_FPUL_FRN_OPCODE = 0xF00d,
+    LDSFPSCR_OPCODE = 0x406a,
+    STSFPSCR_OPCODE = 0x006a,
+    LDSRMFPUL_OPCODE = 0x405a,
+    FSTSFPULFRN_OPCODE = 0xf00d,
+    FABS_OPCODE = 0xf05d,
+    FSQRT_OPCODE = 0xf06d,
+    FSCHG_OPCODE = 0xf3fd,
+    CLRT_OPCODE = 8,
+    SYNCO_OPCODE = 0x00ab,
+};
+
+namespace SH4Registers {
+typedef enum {
+    r0,
+    r1,
+    r2,
+    r3,
+    r4,
+    r5,
+    r6,
+    r7,
+    r8,
+    r9,
+    r10,
+    r11,
+    r12,
+    r13,
+    r14, fp = r14,
+    r15, sp = r15,
+    pc,
+    pr,
+} RegisterID;
+
+typedef enum {
+    fr0, dr0 = fr0,
+    fr1,
+    fr2, dr2 = fr2,
+    fr3,
+    fr4, dr4 = fr4,
+    fr5,
+    fr6, dr6 = fr6,
+    fr7,
+    fr8, dr8 = fr8,
+    fr9,
+    fr10, dr10 = fr10,
+    fr11,
+    fr12, dr12 = fr12,
+    fr13,
+    fr14, dr14 = fr14,
+    fr15,
+} FPRegisterID;
+}
+
+inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup2(uint16_t opc, int rm)
+{
+    return (opc | ((rm & 0xf) << 8));
+}
+
+inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0xf) << 8) | (rn & 0xff));
+}
+
+inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset)
+{
+    return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf));
+}
+
+inline uint16_t getOpcodeGroup5(uint16_t opc, int rm)
+{
+    return (opc | (rm & 0xff));
+}
+
+inline uint16_t getOpcodeGroup6(uint16_t opc, int rm)
+{
+    return (opc | (rm & 0xfff));
+}
+
+inline uint16_t getOpcodeGroup7(uint16_t opc, int rm)
+{
+    return (opc | ((rm & 0x7) << 9));
+}
+
+inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn)
+{
+    return (opc | ((rm & 0xf) << 4) | (rn & 0xf));
+}
+
+inline uint16_t getRn(uint16_t x)
+{
+    return ((x & 0xf00) >> 8);
+}
+
+inline uint16_t getRm(uint16_t x)
+{
+    return ((x & 0xf0) >> 4);
+}
+
+inline uint16_t getDisp(uint16_t x)
+{
+    return (x & 0xf);
+}
+
+inline uint16_t getImm8(uint16_t x)
+{
+    return (x & 0xff);
+}
+
+inline uint16_t getImm12(uint16_t x)
+{
+    return (x & 0xfff);
+}
+
+inline uint16_t getDRn(uint16_t x)
+{
+    return ((x & 0xe00) >> 9);
+}
+
+inline uint16_t getDRm(uint16_t x)
+{
+    return ((x & 0xe0) >> 5);
+}
+
+class SH4Assembler {
+public:
+    typedef SH4Registers::RegisterID RegisterID;
+    typedef SH4Registers::FPRegisterID FPRegisterID;
+    typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer;
+    static const RegisterID scratchReg1 = SH4Registers::r3;
+    static const RegisterID scratchReg2 = SH4Registers::r11;
+    static const uint32_t maxInstructionSize = 16;
+
+    static constexpr RegisterID firstRegister() { return SH4Registers::r0; }
+    static constexpr RegisterID lastRegister() { return SH4Registers::r15; }
+
+    static constexpr FPRegisterID firstFPRegister() { return SH4Registers::dr0; }
+    static constexpr FPRegisterID lastFPRegister() { return SH4Registers::dr14; }
+
+    enum {
+        padForAlign8 = 0x00,
+        padForAlign16 = 0x0009,
+        padForAlign32 = 0x00090009,
+    };
+
+    enum JumpType {
+        JumpFar,
+        JumpNear
+    };
+
+    SH4Assembler()
+        : m_claimscratchReg(0x0)
+        , m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+
+    SH4Buffer& buffer() { return m_buffer; }
+
+    // SH4 condition codes
+    typedef enum {
+        EQ = 0x0, // Equal
+        NE = 0x1, // Not Equal
+        HS = 0x2, // Unsigned Greater Than equal
+        HI = 0x3, // Unsigned Greater Than
+        LS = 0x4, // Unsigned Lower or Same
+        LI = 0x5, // Unsigned Lower
+        GE = 0x6, // Greater or Equal
+        LT = 0x7, // Less Than
+        GT = 0x8, // Greater Than
+        LE = 0x9, // Less or Equal
+        OF = 0xa, // OverFlow
+        SI = 0xb, // Signed
+        NS = 0xc, // Not Signed
+        EQU= 0xd, // Equal or unordered(NaN)
+        NEU= 0xe,
+        GTU= 0xf,
+        GEU= 0x10,
+        LTU= 0x11,
+        LEU= 0x12,
+    } Condition;
+
+    // Opaque label types
+public:
+    bool isImmediate(int constant)
+    {
+        return ((constant <= 127) && (constant >= -128));
+    }
+
+    RegisterID claimScratch()
+    {
+        ASSERT((m_claimscratchReg != 0x3));
+
+        if (!(m_claimscratchReg & 0x1)) {
+            m_claimscratchReg = (m_claimscratchReg | 0x1);
+            return scratchReg1;
+        }
+
+        m_claimscratchReg = (m_claimscratchReg | 0x2);
+        return scratchReg2;
+    }
+
+    void releaseScratch(RegisterID scratchR)
+    {
+        if (scratchR == scratchReg1)
+            m_claimscratchReg = (m_claimscratchReg & 0x2);
+        else
+            m_claimscratchReg = (m_claimscratchReg & 0x1);
+    }
+
+    // Stack operations
+
+    void pushReg(RegisterID reg)
+    {
+        if (reg == SH4Registers::pr) {
+            oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp));
+            return;
+        }
+
+        oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg));
+    }
+
+    void popReg(RegisterID reg)
+    {
+        if (reg == SH4Registers::pr) {
+            oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp));
+            return;
+        }
+
+        oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp));
+    }
+
+    void movt(RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst);
+        oneShortOp(opc);
+    }
+
+    // Arithmetic operations
+
+    void addlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void addclRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void addvlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void addlImm8r(int imm8, RegisterID dst)
+    {
+        ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+        uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8);
+        oneShortOp(opc);
+    }
+
+    void andlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void andlImm8r(int imm8, RegisterID dst)
+    {
+        ASSERT((imm8 <= 255) && (imm8 >= 0));
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8);
+        oneShortOp(opc);
+    }
+
+    void div1lRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void div0lRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void notlReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void orlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void orlImm8r(int imm8, RegisterID dst)
+    {
+        ASSERT((imm8 <= 255) && (imm8 >= 0));
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8);
+        oneShortOp(opc);
+    }
+
+    void sublRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void subvlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void xorlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void xorlImm8r(int imm8, RegisterID dst)
+    {
+        ASSERT((imm8 <= 255) && (imm8 >= 0));
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8);
+        oneShortOp(opc);
+    }
+
+    void shllImm8r(int imm, RegisterID dst)
+    {
+        switch (imm) {
+        case 1:
+            oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst));
+            break;
+        case 2:
+            oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst));
+            break;
+        case 8:
+            oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst));
+            break;
+        case 16:
+            oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void neg(RegisterID dst, RegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void shldRegReg(RegisterID dst, RegisterID rShift)
+    {
+        oneShortOp(getOpcodeGroup1(SHLD_OPCODE, dst, rShift));
+    }
+
+    void shadRegReg(RegisterID dst, RegisterID rShift)
+    {
+        oneShortOp(getOpcodeGroup1(SHAD_OPCODE, dst, rShift));
+    }
+
+    void shlrImm8r(int imm, RegisterID dst)
+    {
+        switch (imm) {
+        case 1:
+            oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst));
+            break;
+        case 2:
+            oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst));
+            break;
+        case 8:
+            oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst));
+            break;
+        case 16:
+            oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void shalImm8r(int imm, RegisterID dst)
+    {
+        switch (imm) {
+        case 1:
+            oneShortOp(getOpcodeGroup2(SHAL_OPCODE, dst));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void sharImm8r(int imm, RegisterID dst)
+    {
+        switch (imm) {
+        case 1:
+            oneShortOp(getOpcodeGroup2(SHAR_OPCODE, dst));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void imullRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void dmullRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void dmulslRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void stsmacl(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void stsmach(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    // Comparisons
+
+    void cmplRegReg(RegisterID left, RegisterID right, Condition cond)
+    {
+        switch (cond) {
+        case NE:
+            oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+            break;
+        case GT:
+            oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left));
+            break;
+        case EQ:
+            oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+            break;
+        case GE:
+            oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left));
+            break;
+        case HS:
+            oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left));
+            break;
+        case HI:
+            oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left));
+            break;
+        case LI:
+            oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right));
+            break;
+        case LS:
+            oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right));
+            break;
+        case LE:
+            oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right));
+            break;
+        case LT:
+            oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void cmppl(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void cmppz(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void cmpEqImmR0(int imm, RegisterID dst)
+    {
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+        uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm);
+        oneShortOp(opc);
+    }
+
+    void testlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void testlImm8r(int imm, RegisterID dst)
+    {
+        ASSERT((imm <= 255) && (imm >= 0));
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm);
+        oneShortOp(opc);
+    }
+
+    void nop()
+    {
+        oneShortOp(NOP_OPCODE, false);
+    }
+
+    void synco()
+    {
+        oneShortOp(SYNCO_OPCODE);
+    }
+
+    void sett()
+    {
+        oneShortOp(SETT_OPCODE);
+    }
+
+    void clrt()
+    {
+        oneShortOp(CLRT_OPCODE);
+    }
+
+    void fschg()
+    {
+        oneShortOp(FSCHG_OPCODE);
+    }
+
+    void bkpt()
+    {
+        oneShortOp(BRK_OPCODE, false);
+    }
+
+    void branch(uint16_t opc, int label)
+    {
+        switch (opc) {
+        case BT_OPCODE:
+            ASSERT((label <= 127) && (label >= -128));
+            oneShortOp(getOpcodeGroup5(BT_OPCODE, label));
+            break;
+        case BRA_OPCODE:
+            ASSERT((label <= 2047) && (label >= -2048));
+            oneShortOp(getOpcodeGroup6(BRA_OPCODE, label));
+            break;
+        case BF_OPCODE:
+            ASSERT((label <= 127) && (label >= -128));
+            oneShortOp(getOpcodeGroup5(BF_OPCODE, label));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void branch(uint16_t opc, RegisterID reg)
+    {
+        switch (opc) {
+        case BRAF_OPCODE:
+            oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg));
+            break;
+        case JMP_OPCODE:
+            oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg));
+            break;
+        case JSR_OPCODE:
+            oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg));
+            break;
+        case BSRF_OPCODE:
+            oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg));
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+
+    void ldspr(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void stspr(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void extub(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+    
+    void extuw(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    // float operations
+
+    void ldsrmfpul(RegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src);
+        oneShortOp(opc);
+    }
+
+    void fneg(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst);
+        oneShortOp(opc, true, false);
+    }
+
+    void fsqrt(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst);
+        oneShortOp(opc, true, false);
+    }
+
+    void stsfpulReg(RegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src);
+        oneShortOp(opc);
+    }
+
+    void floatfpulfrn(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmull(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOV_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsReadrm(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsWriterm(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsWriter0r(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsReadr0r(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsReadrminc(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fmovsWriterndec(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void ftrcRegfpul(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src);
+        oneShortOp(opc, true, false);
+    }
+
+    void fldsfpul(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src);
+        oneShortOp(opc);
+    }
+
+    void fstsfpul(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src);
+        oneShortOp(opc);
+    }
+
+    void ldsfpscr(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    void stsfpscr(RegisterID reg)
+    {
+        uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg);
+        oneShortOp(opc);
+    }
+
+    // double operations
+
+    void dcnvds(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dcnvsd(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1);
+        oneShortOp(opc);
+    }
+
+    void dcmppeq(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dcmppgt(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dmulRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dsubRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void daddRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dmovRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void ddivRegReg(FPRegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void dabs(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup7(FABS_OPCODE, dst >> 1);
+        oneShortOp(opc);
+    }
+
+    void dsqrt(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1);
+        oneShortOp(opc);
+    }
+
+    void dneg(FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1);
+        oneShortOp(opc);
+    }
+
+    void fmovReadrm(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src);
+        oneShortOp(opc);
+    }
+
+    void fmovWriterm(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void fmovWriter0r(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void fmovReadr0r(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src);
+        oneShortOp(opc);
+    }
+
+    void fmovReadrminc(RegisterID src, FPRegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src);
+        oneShortOp(opc);
+    }
+
+    void fmovWriterndec(FPRegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void floatfpulDreg(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1);
+        oneShortOp(opc);
+    }
+
+    void ftrcdrmfpul(FPRegisterID src)
+    {
+        uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1);
+        oneShortOp(opc);
+    }
+
+    // Various move ops
+
+    void movImm8(int imm8, RegisterID dst)
+    {
+        ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+        oneShortOp(opc);
+    }
+
+    void movlRegReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movwRegMem(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movwMemReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movwMemRegIn(RegisterID base, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVW_READ_RMINC_OPCODE, dst, base);
+        oneShortOp(opc);
+    }
+
+    void movwPCReg(int offset, RegisterID base, RegisterID dst)
+    {
+        ASSERT_UNUSED(base, base == SH4Registers::pc);
+        ASSERT((offset <= 255) && (offset >= 0));
+
+        uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset);
+        oneShortOp(opc);
+    }
+
+    void movwMemReg(int offset, RegisterID base, RegisterID dst)
+    {
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset);
+        oneShortOp(opc);
+    }
+
+    void movwR0mr(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movwRegMemr0(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVW_WRITE_R0RN_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movlRegMem(RegisterID src, int offset, RegisterID base)
+    {
+        ASSERT((offset <= 15) && (offset >= 0));
+
+        if (!offset) {
+            oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src));
+            return;
+        }
+
+        oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset));
+    }
+
+    void movlRegMem(RegisterID src, RegisterID base)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src);
+        oneShortOp(opc);
+    }
+
+    void movlMemReg(int offset, RegisterID base, RegisterID dst)
+    {
+        if (base == SH4Registers::pc) {
+            ASSERT((offset <= 255) && (offset >= 0));
+            oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset));
+            return;
+        }
+
+        ASSERT((offset <= 15) && (offset >= 0));
+        if (!offset) {
+            oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base));
+            return;
+        }
+
+        oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+    }
+
+    void movlMemRegCompact(int offset, RegisterID base, RegisterID dst)
+    {
+        oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+    }
+
+    void movbRegMem(RegisterID src, RegisterID base)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src);
+        oneShortOp(opc);
+    }
+
+    void movbMemReg(int offset, RegisterID base, RegisterID dst)
+    {
+        ASSERT_UNUSED(dst, dst == SH4Registers::r0);
+
+        uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset);
+        oneShortOp(opc);
+    }
+
+    void movbR0mr(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movbMemReg(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movbMemRegIn(RegisterID base, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVB_READ_RMINC_OPCODE, dst, base);
+        oneShortOp(opc);
+    }
+
+    void movbRegMemr0(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVB_WRITE_R0RN_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movlMemReg(RegisterID base, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base);
+        oneShortOp(opc);
+    }
+
+    void movlMemRegIn(RegisterID base, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base);
+        oneShortOp(opc);
+    }
+
+    void movlR0mr(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void movlRegMemr0(RegisterID src, RegisterID dst)
+    {
+        uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src);
+        oneShortOp(opc);
+    }
+
+    void loadConstant(uint32_t constant, RegisterID dst)
+    {
+        if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) {
+            movImm8(constant, dst);
+            return;
+        }
+
+        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+        m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+        printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+        m_buffer.putShortWithConstantInt(opc, constant, true);
+    }
+
+    void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false)
+    {
+        uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+        if (ensureSpace)
+            m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+
+        printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+        m_buffer.putShortWithConstantInt(opc, constant);
+    }
+
+    // Flow control
+
+    AssemblerLabel call()
+    {
+        RegisterID scr = claimScratch();
+        m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+        loadConstantUnReusable(0x0, scr);
+        branch(JSR_OPCODE, scr);
+        nop();
+        releaseScratch(scr);
+        return m_buffer.label();
+    }
+
+    AssemblerLabel call(RegisterID dst)
+    {
+        m_buffer.ensureSpace(maxInstructionSize + 2);
+        branch(JSR_OPCODE, dst);
+        nop();
+        return m_buffer.label();
+    }
+
+    AssemblerLabel jmp()
+    {
+        RegisterID scr = claimScratch();
+        m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+        loadConstantUnReusable(0x0, scr);
+        branch(BRAF_OPCODE, scr);
+        nop();
+        releaseScratch(scr);
+        return m_buffer.label();
+    }
+
+    AssemblerLabel extraInstrForBranch(RegisterID dst)
+    {
+        loadConstantUnReusable(0x0, dst);
+        branch(BRAF_OPCODE, dst);
+        nop();
+        return m_buffer.label();
+    }
+
+    AssemblerLabel jmp(RegisterID dst)
+    {
+        jmpReg(dst);
+        return m_buffer.label();
+    }
+
+    void jmpReg(RegisterID dst)
+    {
+        m_buffer.ensureSpace(maxInstructionSize + 2);
+        branch(JMP_OPCODE, dst);
+        nop();
+    }
+
+    AssemblerLabel jne()
+    {
+        branch(BF_OPCODE, 0);
+        return m_buffer.label();
+    }
+
+    AssemblerLabel je()
+    {
+        branch(BT_OPCODE, 0);
+        return m_buffer.label();
+    }
+
+    AssemblerLabel bra()
+    {
+        branch(BRA_OPCODE, 0);
+        return m_buffer.label();
+    }
+
+    void ret()
+    {
+        m_buffer.ensureSpace(maxInstructionSize + 2);
+        oneShortOp(RTS_OPCODE, false);
+    }
+
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        m_buffer.ensureSpaceForAnyInstruction();
+        return m_buffer.label();
+    }
+
+    AssemblerLabel labelForWatchpoint()
+    {
+        m_buffer.ensureSpaceForAnyInstruction();
+        AssemblerLabel result = m_buffer.label();
+        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = labelIgnoringWatchpoints();
+        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            nop();
+            result = labelIgnoringWatchpoints();
+        }
+        return result;
+    }
+
+    int sizeOfConstantPool()
+    {
+        return m_buffer.sizeOfConstantPool();
+    }
+
+    AssemblerLabel align(int alignment)
+    {
+        m_buffer.ensureSpace(maxInstructionSize + 2);
+        while (!m_buffer.isAligned(alignment)) {
+            nop();
+            m_buffer.ensureSpace(maxInstructionSize + 2);
+        }
+        return label();
+    }
+
+    static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress)
+    {
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        uint32_t address = (offset << 2) + ((reinterpret_cast(instructionPtr) + 4) &(~0x3));
+        *reinterpret_cast(address) = newAddress;
+    }
+
+    static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr)
+    {
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        uint32_t address = (offset << 2) + ((reinterpret_cast(instructionPtr) + 4) &(~0x3));
+        return *reinterpret_cast(address);
+    }
+
+    static uint16_t* getInstructionPtr(void* code, int offset)
+    {
+        return reinterpret_cast (reinterpret_cast(code) + offset);
+    }
+
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+
+        uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset) - 3;
+        int offsetBits = (reinterpret_cast(to) - reinterpret_cast(code)) - from.m_offset;
+
+        /* MOV #imm, reg => LDR reg
+           braf @reg        braf @reg
+           nop              nop
+        */
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+        printInstr(*instructionPtr, from.m_offset + 2);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+        instructionPtr -= 3;
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to));
+    }
+
+    static void linkPointer(void* code, AssemblerLabel where, void* value)
+    {
+        uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset);
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(value));
+    }
+
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        ASSERT(call.isSet());
+        return call.m_offset;
+    }
+
+    static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool)
+    {
+        return (constPool + (*insn & 0xff));
+    }
+
+    static SH4Word patchConstantPoolLoad(SH4Word load, int value)
+    {
+        return ((load & ~0xff) | value);
+    }
+
+    static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset)
+    {
+        ASSERT(((offset >> 1) <= 2047) && ((offset >> 1) >= -2048));
+
+        SH4Buffer::TwoShorts m_barrier;
+        m_barrier.high = (BRA_OPCODE | (offset >> 1));
+        m_barrier.low = NOP_OPCODE;
+        printInstr(((BRA_OPCODE | (offset >> 1))), 0);
+        printInstr(NOP_OPCODE, 0);
+        return m_barrier;
+    }
+
+    static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+    {
+        SH4Word* instructionPtr = reinterpret_cast(loadAddr);
+        SH4Word instruction = *instructionPtr;
+        SH4Word index = instruction & 0xff;
+
+        if ((instruction & 0xf000) != MOVIMM_OPCODE)
+            return;
+
+        ASSERT((((reinterpret_cast(constPoolAddr) - reinterpret_cast(loadAddr)) + index * 4)) < 1024);
+
+        int offset = reinterpret_cast(constPoolAddr) + (index * 4) - ((reinterpret_cast(instructionPtr) & ~0x03) + 4);
+        instruction &= 0x0f00;
+        instruction |= 0xd000;
+        offset &= 0x03ff;
+        instruction |= (offset >> 2);
+        *instructionPtr = instruction;
+        printInstr(instruction, reinterpret_cast(loadAddr));
+    }
+
+    static void repatchPointer(void* where, void* value)
+    {
+        patchPointer(where, value);
+    }
+
+    static void* readPointer(void* code)
+    {
+        return reinterpret_cast(readInt32(code));
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        uint16_t* instructionPtr = reinterpret_cast(where);
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value);
+    }
+
+    static void repatchCompact(void* where, int32_t value)
+    {
+        uint16_t* instructionPtr = reinterpret_cast(where);
+        ASSERT(value >= 0);
+        ASSERT(value <= 60);
+
+        // Handle the uncommon case where a flushConstantPool occured in movlMemRegCompact.
+        if ((instructionPtr[0] & 0xf000) == BRA_OPCODE)
+            instructionPtr += (instructionPtr[0] & 0x0fff) + 2;
+
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFRM_OPCODE);
+        instructionPtr[0] = (instructionPtr[0] & 0xfff0) | (value >> 2);
+        cacheFlush(instructionPtr, sizeof(uint16_t));
+    }
+
+    static void relinkCall(void* from, void* to)
+    {
+        uint16_t* instructionPtr = reinterpret_cast(from);
+        instructionPtr -= 3;
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to));
+    }
+
+    static void relinkJump(void* from, void* to)
+    {
+        uint16_t* instructionPtr = reinterpret_cast (from);
+        instructionPtr -= 3;
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast(to) - reinterpret_cast(from));
+    }
+
+    // Linking & patching
+
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return sizeof(SH4Word) * 6;
+    }
+
+    static void replaceWithJump(void *instructionStart, void *to)
+    {
+        SH4Word* instruction = reinterpret_cast(instructionStart);
+        intptr_t difference = reinterpret_cast(to) - (reinterpret_cast(instruction) + 3 * sizeof(SH4Word));
+
+        if ((instruction[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE) {
+            // We have an entry in constant pool and we potentially replace a branchPtrWithPatch, so let's backup what would be the
+            // condition (CMP/xx and Bx opcodes) for later use in revertJumpReplacementToBranchPtrWithPatch before putting the jump.
+            instruction[4] = instruction[1];
+            instruction[5] = instruction[2];
+            instruction[1] = (BRAF_OPCODE | (instruction[0] & 0x0f00));
+            instruction[2] = NOP_OPCODE;
+            cacheFlush(&instruction[1], 2 * sizeof(SH4Word));
+        } else {
+            instruction[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, 1);
+            instruction[1] = getOpcodeGroup2(BRAF_OPCODE, SH4Registers::r13);
+            instruction[2] = NOP_OPCODE;
+            cacheFlush(instruction, 3 * sizeof(SH4Word));
+        }
+
+        changePCrelativeAddress(instruction[0] & 0x00ff, instruction, difference);
+    }
+
+    static void revertJumpReplacementToBranchPtrWithPatch(void* instructionStart, RegisterID rd, int imm)
+    {
+        SH4Word *insn = reinterpret_cast(instructionStart);
+        ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        ASSERT((insn[0] & 0x00ff) != 1);
+
+        insn[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, insn[0] & 0x00ff);
+        if ((insn[1] & 0xf0ff) == BRAF_OPCODE) {
+            insn[1] = (insn[4] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4); // Restore CMP/xx opcode.
+            insn[2] = insn[5];
+            ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
+            ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+            insn[4] = (BRAF_OPCODE | (insn[3] & 0x0f00));
+            insn[5] = NOP_OPCODE;
+            cacheFlush(insn, 6 * sizeof(SH4Word));
+        } else {
+            // The branchPtrWithPatch has already been restored, so we just patch the immediate value and ASSERT all is as expected.
+            ASSERT((insn[1] & 0xf000) == 0x3000);
+            insn[1] = (insn[1] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4);
+            cacheFlush(insn, 2 * sizeof(SH4Word));
+            ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE));
+            ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+            ASSERT(insn[5] == NOP_OPCODE);
+        }
+
+        changePCrelativeAddress(insn[0] & 0x00ff, insn, imm);
+    }
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
+    {
+        ASSERT(to.isSet());
+        ASSERT(from.isSet());
+
+        uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset) - 1;
+        int offsetBits = (to.m_offset - from.m_offset);
+
+        if (type == JumpNear) {
+            uint16_t instruction = instructionPtr[0];
+            int offset = (offsetBits - 2);
+            ASSERT((((instruction == BT_OPCODE) || (instruction == BF_OPCODE)) && (offset >= -256) && (offset <= 254))
+                || ((instruction == BRA_OPCODE) && (offset >= -4096) && (offset <= 4094)));
+            *instructionPtr++ = instruction | (offset >> 1);
+            printInstr(*instructionPtr, from.m_offset + 2);
+            return;
+        }
+
+        /* MOV # imm, reg => LDR reg
+           braf @reg         braf @reg
+           nop               nop
+        */
+        instructionPtr -= 2;
+        ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE);
+
+        if ((instructionPtr[0] & 0xf000) == MOVIMM_OPCODE) {
+            uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+            *addr = offsetBits;
+            printInstr(*instructionPtr, from.m_offset + 2);
+            return;
+        }
+
+        ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+        changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+        printInstr(*instructionPtr, from.m_offset + 2);
+    }
+
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+    }
+
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+
+    static void patchPointer(void* code, AssemblerLabel where, void* value)
+    {
+        patchPointer(reinterpret_cast(code) + where.m_offset, value);
+    }
+
+    static void patchPointer(void* code, void* value)
+    {
+        patchInt32(code, reinterpret_cast(value));
+    }
+
+    static void patchInt32(void* code, uint32_t value)
+    {
+        changePCrelativeAddress((*(reinterpret_cast(code)) & 0xff), reinterpret_cast(code), value);
+    }
+
+    static uint32_t readInt32(void* code)
+    {
+        return readPCrelativeAddress((*(reinterpret_cast(code)) & 0xff), reinterpret_cast(code));
+    }
+
+    static void* readCallTarget(void* from)
+    {
+        uint16_t* instructionPtr = static_cast(from);
+        instructionPtr -= 3;
+        return reinterpret_cast(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
+    }
+
+    static void cacheFlush(void* code, size_t size)
+    {
+#if OS(LINUX)
+        // Flush each page separately, otherwise the whole flush will fail if an uncommited page is in the area.
+        unsigned currentPage = reinterpret_cast(code) & ~(pageSize() - 1);
+        unsigned lastPage = (reinterpret_cast(code) + size - 1) & ~(pageSize() - 1);
+        do {
+#if defined CACHEFLUSH_D_L2
+            syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+            syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+            currentPage += pageSize();
+        } while (lastPage >= currentPage);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+    }
+
+    void prefix(uint16_t pre)
+    {
+        m_buffer.putByte(pre);
+    }
+
+    void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true)
+    {
+        printInstr(opcode, m_buffer.codeSize(), isDouble);
+        if (checksize)
+            m_buffer.ensureSpace(maxInstructionSize);
+        m_buffer.putShortUnchecked(opcode);
+    }
+
+    void ensureSpace(int space)
+    {
+        m_buffer.ensureSpace(space);
+    }
+
+    void ensureSpace(int insnSpace, int constSpace)
+    {
+        m_buffer.ensureSpace(insnSpace, constSpace);
+    }
+
+    // Administrative methods
+
+    void* data() const { return m_buffer.data(); }
+    size_t codeSize() const { return m_buffer.codeSize(); }
+
+    unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+#ifdef SH4_ASSEMBLER_TRACING
+    static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true)
+    {
+        if (!getenv("JavaScriptCoreDumpJIT"))
+            return;
+
+        const char *format = 0;
+        printfStdoutInstr("offset: 0x%8.8x\t", size);
+        switch (opc) {
+        case BRK_OPCODE:
+            format = "    BRK\n";
+            break;
+        case NOP_OPCODE:
+            format = "    NOP\n";
+            break;
+        case RTS_OPCODE:
+            format ="    *RTS\n";
+            break;
+        case SETS_OPCODE:
+            format = "    SETS\n";
+            break;
+        case SETT_OPCODE:
+            format = "    SETT\n";
+            break;
+        case CLRT_OPCODE:
+            format = "    CLRT\n";
+            break;
+        case FSCHG_OPCODE:
+            format = "    FSCHG\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format);
+            return;
+        }
+        switch (opc & 0xf0ff) {
+        case BRAF_OPCODE:
+            format = "    *BRAF R%d\n";
+            break;
+        case DT_OPCODE:
+            format = "    DT R%d\n";
+            break;
+        case CMPPL_OPCODE:
+            format = "    CMP/PL R%d\n";
+            break;
+        case CMPPZ_OPCODE:
+            format = "    CMP/PZ R%d\n";
+            break;
+        case JMP_OPCODE:
+            format = "    *JMP @R%d\n";
+            break;
+        case JSR_OPCODE:
+            format = "    *JSR @R%d\n";
+            break;
+        case LDSPR_OPCODE:
+            format = "    LDS R%d, PR\n";
+            break;
+        case LDSLPR_OPCODE:
+            format = "    LDS.L @R%d+, PR\n";
+            break;
+        case MOVT_OPCODE:
+            format = "    MOVT R%d\n";
+            break;
+        case SHAL_OPCODE:
+            format = "    SHAL R%d\n";
+            break;
+        case SHAR_OPCODE:
+            format = "    SHAR R%d\n";
+            break;
+        case SHLL_OPCODE:
+            format = "    SHLL R%d\n";
+            break;
+        case SHLL2_OPCODE:
+            format = "    SHLL2 R%d\n";
+            break;
+        case SHLL8_OPCODE:
+            format = "    SHLL8 R%d\n";
+            break;
+        case SHLL16_OPCODE:
+            format = "    SHLL16 R%d\n";
+            break;
+        case SHLR_OPCODE:
+            format = "    SHLR R%d\n";
+            break;
+        case SHLR2_OPCODE:
+            format = "    SHLR2 R%d\n";
+            break;
+        case SHLR8_OPCODE:
+            format = "    SHLR8 R%d\n";
+            break;
+        case SHLR16_OPCODE:
+            format = "    SHLR16 R%d\n";
+            break;
+        case STSPR_OPCODE:
+            format = "    STS PR, R%d\n";
+            break;
+        case STSLPR_OPCODE:
+            format = "    STS.L PR, @-R%d\n";
+            break;
+        case LDS_RM_FPUL_OPCODE:
+            format = "    LDS R%d, FPUL\n";
+            break;
+        case STS_FPUL_RN_OPCODE:
+            format = "    STS FPUL, R%d \n";
+            break;
+        case FLDS_FRM_FPUL_OPCODE:
+            format = "    FLDS FR%d, FPUL\n";
+            break;
+        case FSTS_FPUL_FRN_OPCODE:
+            format = "    FSTS FPUL, R%d \n";
+            break;
+        case LDSFPSCR_OPCODE:
+            format = "    LDS R%d, FPSCR \n";
+            break;
+        case STSFPSCR_OPCODE:
+            format = "    STS FPSCR, R%d \n";
+            break;
+        case STSMACL_OPCODE:
+            format = "    STS MACL, R%d \n";
+            break;
+        case STSMACH_OPCODE:
+            format = "    STS MACH, R%d \n";
+            break;
+        case BSRF_OPCODE:
+            format = "    *BSRF R%d";
+            break;
+        case FTRC_OPCODE:
+            format = "    FTRC FR%d, FPUL\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getRn(opc));
+            return;
+        }
+        switch (opc & 0xf0ff) {
+        case FNEG_OPCODE:
+            format = "    FNEG DR%d\n";
+            break;
+        case FLOAT_OPCODE:
+            format = "    FLOAT DR%d\n";
+            break;
+        case FTRC_OPCODE:
+            format = "    FTRC FR%d, FPUL\n";
+            break;
+        case FABS_OPCODE:
+            format = "    FABS FR%d\n";
+            break;
+        case FSQRT_OPCODE:
+            format = "    FSQRT FR%d\n";
+            break;
+        case FCNVDS_DRM_FPUL_OPCODE:
+            format = "    FCNVDS FR%d, FPUL\n";
+            break;
+        case FCNVSD_FPUL_DRN_OPCODE:
+            format = "    FCNVSD FPUL, FR%d\n";
+            break;
+        }
+        if (format) {
+            if (isdoubleInst)
+                printfStdoutInstr(format, getDRn(opc) << 1);
+            else
+                printfStdoutInstr(format, getRn(opc));
+            return;
+        }
+        switch (opc & 0xf00f) {
+        case ADD_OPCODE:
+            format = "    ADD R%d, R%d\n";
+            break;
+        case ADDC_OPCODE:
+            format = "    ADDC R%d, R%d\n";
+            break;
+        case ADDV_OPCODE:
+            format = "    ADDV R%d, R%d\n";
+            break;
+        case AND_OPCODE:
+            format = "    AND R%d, R%d\n";
+            break;
+        case DIV1_OPCODE:
+            format = "    DIV1 R%d, R%d\n";
+            break;
+        case CMPEQ_OPCODE:
+            format = "    CMP/EQ R%d, R%d\n";
+            break;
+        case CMPGE_OPCODE:
+            format = "    CMP/GE R%d, R%d\n";
+            break;
+        case CMPGT_OPCODE:
+            format = "    CMP/GT R%d, R%d\n";
+            break;
+        case CMPHI_OPCODE:
+            format = "    CMP/HI R%d, R%d\n";
+            break;
+        case CMPHS_OPCODE:
+            format = "    CMP/HS R%d, R%d\n";
+            break;
+        case MOV_OPCODE:
+            format = "    MOV R%d, R%d\n";
+            break;
+        case MOVB_WRITE_RN_OPCODE:
+            format = "    MOV.B R%d, @R%d\n";
+            break;
+        case MOVB_WRITE_RNDEC_OPCODE:
+            format = "    MOV.B R%d, @-R%d\n";
+            break;
+        case MOVB_WRITE_R0RN_OPCODE:
+            format = "    MOV.B R%d, @(R0, R%d)\n";
+            break;
+        case MOVB_READ_RM_OPCODE:
+            format = "    MOV.B @R%d, R%d\n";
+            break;
+        case MOVB_READ_RMINC_OPCODE:
+            format = "    MOV.B @R%d+, R%d\n";
+            break;
+        case MOVB_READ_R0RM_OPCODE:
+            format = "    MOV.B @(R0, R%d), R%d\n";
+            break;
+        case MOVL_WRITE_RN_OPCODE:
+            format = "    MOV.L R%d, @R%d\n";
+            break;
+        case MOVL_WRITE_RNDEC_OPCODE:
+            format = "    MOV.L R%d, @-R%d\n";
+            break;
+        case MOVL_WRITE_R0RN_OPCODE:
+            format = "    MOV.L R%d, @(R0, R%d)\n";
+            break;
+        case MOVL_READ_RM_OPCODE:
+            format = "    MOV.L @R%d, R%d\n";
+            break;
+        case MOVL_READ_RMINC_OPCODE:
+            format = "    MOV.L @R%d+, R%d\n";
+            break;
+        case MOVL_READ_R0RM_OPCODE:
+            format = "    MOV.L @(R0, R%d), R%d\n";
+            break;
+        case MULL_OPCODE:
+            format = "    MUL.L R%d, R%d\n";
+            break;
+        case DMULL_L_OPCODE:
+            format = "    DMULU.L R%d, R%d\n";
+            break;
+        case DMULSL_OPCODE:
+            format = "    DMULS.L R%d, R%d\n";
+            break;
+        case NEG_OPCODE:
+            format = "    NEG R%d, R%d\n";
+            break;
+        case NEGC_OPCODE:
+            format = "    NEGC R%d, R%d\n";
+            break;
+        case NOT_OPCODE:
+            format = "    NOT R%d, R%d\n";
+            break;
+        case OR_OPCODE:
+            format = "    OR R%d, R%d\n";
+            break;
+        case SHAD_OPCODE:
+            format = "    SHAD R%d, R%d\n";
+            break;
+        case SHLD_OPCODE:
+            format = "    SHLD R%d, R%d\n";
+            break;
+        case SUB_OPCODE:
+            format = "    SUB R%d, R%d\n";
+            break;
+        case SUBC_OPCODE:
+            format = "    SUBC R%d, R%d\n";
+            break;
+        case SUBV_OPCODE:
+            format = "    SUBV R%d, R%d\n";
+            break;
+        case TST_OPCODE:
+            format = "    TST R%d, R%d\n";
+            break;
+        case XOR_OPCODE:
+            format = "    XOR R%d, R%d\n";break;
+        case MOVW_WRITE_RN_OPCODE:
+            format = "    MOV.W R%d, @R%d\n";
+            break;
+        case MOVW_READ_RM_OPCODE:
+            format = "    MOV.W @R%d, R%d\n";
+            break;
+        case MOVW_READ_RMINC_OPCODE:
+            format = "    MOV.W @R%d+, R%d\n";
+            break;
+        case MOVW_READ_R0RM_OPCODE:
+            format = "    MOV.W @(R0, R%d), R%d\n";
+            break;
+        case MOVW_WRITE_R0RN_OPCODE:
+            format = "    MOV.W R%d, @(R0, R%d)\n";
+            break;
+        case EXTUB_OPCODE:
+            format = "    EXTU.B R%d, R%d\n";
+            break;
+        case EXTUW_OPCODE:
+            format = "    EXTU.W R%d, R%d\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getRm(opc), getRn(opc));
+            return;
+        }
+        switch (opc & 0xf00f) {
+        case FSUB_OPCODE:
+            format = "    FSUB FR%d, FR%d\n";
+            break;
+        case FADD_OPCODE:
+            format = "    FADD FR%d, FR%d\n";
+            break;
+        case FDIV_OPCODE:
+            format = "    FDIV FR%d, FR%d\n";
+            break;
+        case FMUL_OPCODE:
+            format = "    DMULL FR%d, FR%d\n";
+            break;
+        case FMOV_OPCODE:
+            format = "    FMOV FR%d, FR%d\n";
+            break;
+        case FCMPEQ_OPCODE:
+            format = "    FCMP/EQ FR%d, FR%d\n";
+            break;
+        case FCMPGT_OPCODE:
+            format = "    FCMP/GT FR%d, FR%d\n";
+            break;
+        }
+        if (format) {
+            if (isdoubleInst)
+                printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1);
+            else
+                printfStdoutInstr(format, getRm(opc), getRn(opc));
+            return;
+        }
+        switch (opc & 0xf00f) {
+        case FMOVS_WRITE_RN_DEC_OPCODE:
+            format = "    %s FR%d, @-R%d\n";
+            break;
+        case FMOVS_WRITE_RN_OPCODE:
+            format = "    %s FR%d, @R%d\n";
+            break;
+        case FMOVS_WRITE_R0RN_OPCODE:
+            format = "    %s FR%d, @(R0, R%d)\n";
+            break;
+        }
+        if (format) {
+            if (isdoubleInst)
+                printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc));
+            else
+                printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+            return;
+        }
+        switch (opc & 0xf00f) {
+        case FMOVS_READ_RM_OPCODE:
+            format = "    %s @R%d, FR%d\n";
+            break;
+        case FMOVS_READ_RM_INC_OPCODE:
+            format = "    %s @R%d+, FR%d\n";
+            break;
+        case FMOVS_READ_R0RM_OPCODE:
+            format = "    %s @(R0, R%d), FR%d\n";
+            break;
+        }
+        if (format) {
+            if (isdoubleInst)
+                printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1);
+            else
+                printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+            return;
+        }
+        switch (opc & 0xff00) {
+        case BF_OPCODE:
+            format = "    BF %d\n";
+            break;
+        case BFS_OPCODE:
+            format = "    *BF/S %d\n";
+            break;
+        case ANDIMM_OPCODE:
+            format = "    AND #%d, R0\n";
+            break;
+        case BT_OPCODE:
+            format = "    BT %d\n";
+            break;
+        case BTS_OPCODE:
+            format = "    *BT/S %d\n";
+            break;
+        case CMPEQIMM_OPCODE:
+            format = "    CMP/EQ #%d, R0\n";
+            break;
+        case MOVB_WRITE_OFFGBR_OPCODE:
+            format = "    MOV.B R0, @(%d, GBR)\n";
+            break;
+        case MOVB_READ_OFFGBR_OPCODE:
+            format = "    MOV.B @(%d, GBR), R0\n";
+            break;
+        case MOVL_WRITE_OFFGBR_OPCODE:
+            format = "    MOV.L R0, @(%d, GBR)\n";
+            break;
+        case MOVL_READ_OFFGBR_OPCODE:
+            format = "    MOV.L @(%d, GBR), R0\n";
+            break;
+        case MOVA_READ_OFFPC_OPCODE:
+            format = "    MOVA @(%d, PC), R0\n";
+            break;
+        case ORIMM_OPCODE:
+            format = "    OR #%d, R0\n";
+            break;
+        case ORBIMM_OPCODE:
+            format = "    OR.B #%d, @(R0, GBR)\n";
+            break;
+        case TSTIMM_OPCODE:
+            format = "    TST #%d, R0\n";
+            break;
+        case TSTB_OPCODE:
+            format = "    TST.B %d, @(R0, GBR)\n";
+            break;
+        case XORIMM_OPCODE:
+            format = "    XOR #%d, R0\n";
+            break;
+        case XORB_OPCODE:
+            format = "    XOR.B %d, @(R0, GBR)\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getImm8(opc));
+            return;
+        }
+        switch (opc & 0xff00) {
+        case MOVB_WRITE_OFFRN_OPCODE:
+            format = "    MOV.B R0, @(%d, R%d)\n";
+            break;
+        case MOVB_READ_OFFRM_OPCODE:
+            format = "    MOV.B @(%d, R%d), R0\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getDisp(opc), getRm(opc));
+            return;
+        }
+        switch (opc & 0xf000) {
+        case BRA_OPCODE:
+            format = "    *BRA %d\n";
+            break;
+        case BSR_OPCODE:
+            format = "    *BSR %d\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getImm12(opc));
+            return;
+        }
+        switch (opc & 0xf000) {
+        case MOVL_READ_OFFPC_OPCODE:
+            format = "    MOV.L @(%d, PC), R%d\n";
+            break;
+        case ADDIMM_OPCODE:
+            format = "    ADD #%d, R%d\n";
+            break;
+        case MOVIMM_OPCODE:
+            format = "    MOV #%d, R%d\n";
+            break;
+        case MOVW_READ_OFFPC_OPCODE:
+            format = "    MOV.W @(%d, PC), R%d\n";
+            break;
+        }
+        if (format) {
+            printfStdoutInstr(format, getImm8(opc), getRn(opc));
+            return;
+        }
+        switch (opc & 0xf000) {
+        case MOVL_WRITE_OFFRN_OPCODE:
+            format = "    MOV.L R%d, @(%d, R%d)\n";
+            printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc));
+            break;
+        case MOVL_READ_OFFRM_OPCODE:
+            format = "    MOV.L @(%d, R%d), R%d\n";
+            printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc));
+            break;
+        }
+    }
+
+    static void printfStdoutInstr(const char* format, ...)
+    {
+        if (getenv("JavaScriptCoreDumpJIT")) {
+            va_list args;
+            va_start(args, format);
+            vprintfStdoutInstr(format, args);
+            va_end(args);
+        }
+    }
+
+    static void vprintfStdoutInstr(const char* format, va_list args)
+    {
+        if (getenv("JavaScriptCoreDumpJIT"))
+            WTF::dataLogFV(format, args);
+    }
+
+    static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr)
+    {
+        printfStdoutInstr(">> repatch instructions after link\n");
+        for (int i = 0; i <= nbInstr; i++)
+            printInstr(*(first + i), offset + i);
+        printfStdoutInstr(">> end repatch\n");
+    }
+#else
+    static void printInstr(uint16_t, unsigned, bool = true) { };
+    static void printBlockInstr(uint16_t*, unsigned, int) { };
+#endif
+
+    static void replaceWithLoad(void* instructionStart)
+    {
+        SH4Word* insPtr = reinterpret_cast(instructionStart);
+
+        insPtr += 2; // skip MOV and ADD opcodes
+
+        if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) {
+            *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0);
+            cacheFlush(insPtr, sizeof(SH4Word));
+        }
+    }
+
+    static void replaceWithAddressComputation(void* instructionStart)
+    {
+        SH4Word* insPtr = reinterpret_cast(instructionStart);
+
+        insPtr += 2; // skip MOV and ADD opcodes
+
+        if (((*insPtr) & 0xf00f) != MOV_OPCODE) {
+            *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0);
+            cacheFlush(insPtr, sizeof(SH4Word));
+        }
+    }
+
+private:
+    SH4Buffer m_buffer;
+    int m_claimscratchReg;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(SH4)
diff --git a/assembler/X86Assembler.h b/assembler/X86Assembler.h
new file mode 100644
index 0000000..1f3da42
--- /dev/null
+++ b/assembler/X86Assembler.h
@@ -0,0 +1,3797 @@
+/*
+ * Copyright (C) 2008, 2012-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+
+#include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
+#include "JITCompilationEffort.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+
+namespace X86Registers {
+
+#define FOR_EACH_CPU_REGISTER(V) \
+    FOR_EACH_CPU_GPREGISTER(V) \
+    FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+    V(void*, eax) \
+    V(void*, ecx) \
+    V(void*, edx) \
+    V(void*, ebx) \
+    V(void*, esp) \
+    V(void*, ebp) \
+    V(void*, esi) \
+    V(void*, edi) \
+    FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+    V(void*, eip) \
+    V(void*, eflags) \
+
+// Note: the JITs only stores double values in the FP registers.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+    V(double, xmm0) \
+    V(double, xmm1) \
+    V(double, xmm2) \
+    V(double, xmm3) \
+    V(double, xmm4) \
+    V(double, xmm5) \
+    V(double, xmm6) \
+    V(double, xmm7) \
+    FOR_EACH_X86_64_CPU_FPREGISTER(V)
+
+#if CPU(X86)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
+
+#elif CPU(X86_64)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+    V(void*, r8) \
+    V(void*, r9) \
+    V(void*, r10) \
+    V(void*, r11) \
+    V(void*, r12) \
+    V(void*, r13) \
+    V(void*, r14) \
+    V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
+    V(double, xmm8) \
+    V(double, xmm9) \
+    V(double, xmm10) \
+    V(double, xmm11) \
+    V(double, xmm12) \
+    V(double, xmm13) \
+    V(double, xmm14) \
+    V(double, xmm15)
+
+#endif // CPU(X86_64)
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} RegisterID;
+
+typedef enum {
+    #define DECLARE_REGISTER(_type, _regName) _regName,
+    FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+    #undef DECLARE_REGISTER
+} XMMRegisterID;
+
+} // namespace X86Register
+
+class X86Assembler {
+public:
+    typedef X86Registers::RegisterID RegisterID;
+    
+    static constexpr RegisterID firstRegister() { return X86Registers::eax; }
+    static constexpr RegisterID lastRegister()
+    {
+#if CPU(X86_64)
+        return X86Registers::r15;
+#else
+        return X86Registers::edi;
+#endif
+    }
+    
+    typedef X86Registers::XMMRegisterID XMMRegisterID;
+    typedef XMMRegisterID FPRegisterID;
+    
+    static constexpr FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
+    static constexpr FPRegisterID lastFPRegister()
+    {
+#if CPU(X86_64)
+        return X86Registers::xmm15;
+#else
+        return X86Registers::xmm7;
+#endif
+    }
+
+    typedef enum {
+        ConditionO,
+        ConditionNO,
+        ConditionB,
+        ConditionAE,
+        ConditionE,
+        ConditionNE,
+        ConditionBE,
+        ConditionA,
+        ConditionS,
+        ConditionNS,
+        ConditionP,
+        ConditionNP,
+        ConditionL,
+        ConditionGE,
+        ConditionLE,
+        ConditionG,
+
+        ConditionC  = ConditionB,
+        ConditionNC = ConditionAE,
+    } Condition;
+
+private:
+    // OneByteOpcodeID defines the bytecode for 1 byte instruction. It also contains the prefixes
+    // for two bytes instructions.
+    // TwoByteOpcodeID, ThreeByteOpcodeID define the opcodes for the multibytes instructions.
+    //
+    // The encoding for each instruction can be found in the Intel Architecture Manual in the appendix
+    // "Opcode Map."
+    //
+    // Each opcode can have a suffix describing the type of argument. The full list of suffixes is
+    // in the "Key to Abbreviations" section of the "Opcode Map".
+    // The most common argument types are:
+    //     -E: The argument is either a GPR or a memory address.
+    //     -G: The argument is a GPR.
+    //     -I: The argument is an immediate.
+    // The most common sizes are:
+    //     -v: 32 or 64bit depending on the operand-size attribute.
+    //     -z: 32bit in both 32bit and 64bit mode. Common for immediate values.
+    typedef enum {
+        OP_ADD_EbGb                     = 0x00,
+        OP_ADD_EvGv                     = 0x01,
+        OP_ADD_GvEv                     = 0x03,
+        OP_ADD_EAXIv                    = 0x05,
+        OP_OR_EvGv                      = 0x09,
+        OP_OR_GvEv                      = 0x0B,
+        OP_OR_EAXIv                     = 0x0D,
+        OP_2BYTE_ESCAPE                 = 0x0F,
+        OP_AND_EvGv                     = 0x21,
+        OP_AND_GvEv                     = 0x23,
+        OP_SUB_EvGv                     = 0x29,
+        OP_SUB_GvEv                     = 0x2B,
+        OP_SUB_EAXIv                    = 0x2D,
+        PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
+        OP_XOR_EvGv                     = 0x31,
+        OP_XOR_GvEv                     = 0x33,
+        OP_XOR_EAXIv                    = 0x35,
+        OP_CMP_EvGv                     = 0x39,
+        OP_CMP_GvEv                     = 0x3B,
+        OP_CMP_EAXIv                    = 0x3D,
+#if CPU(X86_64)
+        PRE_REX                         = 0x40,
+#endif
+        OP_PUSH_EAX                     = 0x50,
+        OP_POP_EAX                      = 0x58,
+#if CPU(X86_64)
+        OP_MOVSXD_GvEv                  = 0x63,
+#endif
+        PRE_OPERAND_SIZE                = 0x66,
+        PRE_SSE_66                      = 0x66,
+        OP_PUSH_Iz                      = 0x68,
+        OP_IMUL_GvEvIz                  = 0x69,
+        OP_GROUP1_EbIb                  = 0x80,
+        OP_GROUP1_EvIz                  = 0x81,
+        OP_GROUP1_EvIb                  = 0x83,
+        OP_TEST_EbGb                    = 0x84,
+        OP_TEST_EvGv                    = 0x85,
+        OP_XCHG_EvGv                    = 0x87,
+        OP_MOV_EbGb                     = 0x88,
+        OP_MOV_EvGv                     = 0x89,
+        OP_MOV_GvEv                     = 0x8B,
+        OP_LEA                          = 0x8D,
+        OP_GROUP1A_Ev                   = 0x8F,
+        OP_NOP                          = 0x90,
+        OP_XCHG_EAX                     = 0x90,
+        OP_CDQ                          = 0x99,
+        OP_MOV_EAXOv                    = 0xA1,
+        OP_MOV_OvEAX                    = 0xA3,
+        OP_TEST_ALIb                    = 0xA8,
+        OP_TEST_EAXIv                   = 0xA9,
+        OP_MOV_EAXIv                    = 0xB8,
+        OP_GROUP2_EvIb                  = 0xC1,
+        OP_RET                          = 0xC3,
+        OP_GROUP11_EvIb                 = 0xC6,
+        OP_GROUP11_EvIz                 = 0xC7,
+        OP_INT3                         = 0xCC,
+        OP_GROUP2_Ev1                   = 0xD1,
+        OP_GROUP2_EvCL                  = 0xD3,
+        OP_ESCAPE_D9                    = 0xD9,
+        OP_ESCAPE_DD                    = 0xDD,
+        OP_CALL_rel32                   = 0xE8,
+        OP_JMP_rel32                    = 0xE9,
+        PRE_LOCK                        = 0xF0,
+        PRE_SSE_F2                      = 0xF2,
+        PRE_SSE_F3                      = 0xF3,
+        OP_HLT                          = 0xF4,
+        OP_GROUP3_EbIb                  = 0xF6,
+        OP_GROUP3_Ev                    = 0xF7,
+        OP_GROUP3_EvIz                  = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test. 
+        OP_GROUP5_Ev                    = 0xFF,
+    } OneByteOpcodeID;
+
+    typedef enum {
+        OP2_UD2             = 0xB,
+        OP2_MOVSD_VsdWsd    = 0x10,
+        OP2_MOVSD_WsdVsd    = 0x11,
+        OP2_MOVSS_VsdWsd    = 0x10,
+        OP2_MOVSS_WsdVsd    = 0x11,
+        OP2_MOVAPD_VpdWpd   = 0x28,
+        OP2_MOVAPS_VpdWpd   = 0x28,
+        OP2_CVTSI2SD_VsdEd  = 0x2A,
+        OP2_CVTTSD2SI_GdWsd = 0x2C,
+        OP2_CVTTSS2SI_GdWsd = 0x2C,
+        OP2_UCOMISD_VsdWsd  = 0x2E,
+        OP2_3BYTE_ESCAPE_3A = 0x3A,
+        OP2_CMOVCC          = 0x40,
+        OP2_ADDSD_VsdWsd    = 0x58,
+        OP2_MULSD_VsdWsd    = 0x59,
+        OP2_CVTSD2SS_VsdWsd = 0x5A,
+        OP2_CVTSS2SD_VsdWsd = 0x5A,
+        OP2_SUBSD_VsdWsd    = 0x5C,
+        OP2_DIVSD_VsdWsd    = 0x5E,
+        OP2_MOVMSKPD_VdEd   = 0x50,
+        OP2_SQRTSD_VsdWsd   = 0x51,
+        OP2_ANDPS_VpdWpd    = 0x54,
+        OP2_ANDNPD_VpdWpd   = 0x55,
+        OP2_XORPD_VpdWpd    = 0x57,
+        OP2_MOVD_VdEd       = 0x6E,
+        OP2_MOVD_EdVd       = 0x7E,
+        OP2_JCC_rel32       = 0x80,
+        OP_SETCC            = 0x90,
+        OP2_3BYTE_ESCAPE_AE = 0xAE,
+        OP2_IMUL_GvEv       = 0xAF,
+        OP2_MOVZX_GvEb      = 0xB6,
+        OP2_BSF             = 0xBC,
+        OP2_TZCNT           = 0xBC,
+        OP2_BSR             = 0xBD,
+        OP2_LZCNT           = 0xBD,
+        OP2_MOVSX_GvEb      = 0xBE,
+        OP2_MOVZX_GvEw      = 0xB7,
+        OP2_MOVSX_GvEw      = 0xBF,
+        OP2_PEXTRW_GdUdIb   = 0xC5,
+        OP2_PSLLQ_UdqIb     = 0x73,
+        OP2_PSRLQ_UdqIb     = 0x73,
+        OP2_POR_VdqWdq      = 0XEB,
+    } TwoByteOpcodeID;
+    
+    typedef enum {
+        OP3_ROUNDSS_VssWssIb = 0x0A,
+        OP3_ROUNDSD_VsdWsdIb = 0x0B,
+        OP3_MFENCE           = 0xF0,
+    } ThreeByteOpcodeID;
+
+    struct VexPrefix {
+        enum : uint8_t {
+            TwoBytes = 0xC5,
+            ThreeBytes = 0xC4
+        };
+    };
+    enum class VexImpliedBytes : uint8_t {
+        TwoBytesOp = 1,
+        ThreeBytesOp38 = 2,
+        ThreeBytesOp3A = 3
+    };
+    
+    TwoByteOpcodeID cmovcc(Condition cond)
+    {
+        return (TwoByteOpcodeID)(OP2_CMOVCC + cond);
+    }
+
+    TwoByteOpcodeID jccRel32(Condition cond)
+    {
+        return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+    }
+
+    TwoByteOpcodeID setccOpcode(Condition cond)
+    {
+        return (TwoByteOpcodeID)(OP_SETCC + cond);
+    }
+
+    typedef enum {
+        GROUP1_OP_ADD = 0,
+        GROUP1_OP_OR  = 1,
+        GROUP1_OP_ADC = 2,
+        GROUP1_OP_AND = 4,
+        GROUP1_OP_SUB = 5,
+        GROUP1_OP_XOR = 6,
+        GROUP1_OP_CMP = 7,
+
+        GROUP1A_OP_POP = 0,
+        
+        GROUP2_OP_ROL = 0,
+        GROUP2_OP_ROR = 1,
+        GROUP2_OP_RCL = 2,
+        GROUP2_OP_RCR = 3,
+        
+        GROUP2_OP_SHL = 4,
+        GROUP2_OP_SHR = 5,
+        GROUP2_OP_SAR = 7,
+
+        GROUP3_OP_TEST = 0,
+        GROUP3_OP_NOT  = 2,
+        GROUP3_OP_NEG  = 3,
+        GROUP3_OP_DIV = 6,
+        GROUP3_OP_IDIV = 7,
+
+        GROUP5_OP_CALLN = 2,
+        GROUP5_OP_JMPN  = 4,
+        GROUP5_OP_PUSH  = 6,
+
+        GROUP11_MOV = 0,
+
+        GROUP14_OP_PSLLQ = 6,
+        GROUP14_OP_PSRLQ = 2,
+
+        ESCAPE_D9_FSTP_singleReal = 3,
+        ESCAPE_DD_FSTP_doubleReal = 3,
+    } GroupOpcodeID;
+    
+    class X86InstructionFormatter;
+public:
+
+    X86Assembler()
+        : m_indexOfLastWatchpoint(INT_MIN)
+        , m_indexOfTailOfLastWatchpoint(INT_MIN)
+    {
+    }
+    
+    AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
+    // Stack operations:
+
+    void push_r(RegisterID reg)
+    {
+        m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+    }
+
+    void pop_r(RegisterID reg)
+    {
+        m_formatter.oneByteOp(OP_POP_EAX, reg);
+    }
+
+    void push_i32(int imm)
+    {
+        m_formatter.oneByteOp(OP_PUSH_Iz);
+        m_formatter.immediate32(imm);
+    }
+
+    void push_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+    }
+
+    void pop_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+    }
+
+    // Arithmetic operations:
+
+#if !CPU(X86_64)
+    void adcl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+#endif
+
+    void addl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+    }
+
+    void addl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+    }
+    
+#if !CPU(X86_64)
+    void addl_mr(const void* addr, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+    }
+#endif
+
+    void addl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+    }
+
+    void addl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_ADD_EvGv, src, base, index, scale, offset);
+    }
+
+    void addb_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, offset);
+    }
+
+    void addb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, index, scale, offset);
+    }
+
+    void addw_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, offset);
+    }
+
+    void addw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, index, scale, offset);
+    }
+
+    void addl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_ADD_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void addl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void addl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void addb_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, offset);
+        m_formatter.immediate8(imm);
+    }
+
+    void addb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, index, scale, offset);
+        m_formatter.immediate8(imm);
+    }
+
+    void addw_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate16(imm);
+        }
+    }
+
+    void addw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset);
+            m_formatter.immediate16(imm);
+        }
+    }
+
+#if CPU(X86_64)
+    void addq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+    }
+
+    void addq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+    }
+
+    void addq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, offset);
+    }
+
+    void addq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_ADD_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void addq_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+#else
+    void addl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+#endif
+
+    void andl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+    }
+
+    void andl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+    }
+
+    void andl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+    }
+
+    void andl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void andl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+#if CPU(X86_64)
+    void andq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+    }
+
+    void andq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+#else
+    void andl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+#endif
+
+    void dec_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+    }
+
+#if CPU(X86_64)
+    void decq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+    }
+#endif // CPU(X86_64)
+
+    // Only used for testing purposes.
+    void illegalInstruction()
+    {
+        m_formatter.twoByteOp(OP2_UD2);
+    }
+
+    void inc_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+    }
+
+#if CPU(X86_64)
+    void incq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+    }
+
+    void incq_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset);
+    }
+#endif // CPU(X86_64)
+
+    void negl_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+    }
+
+#if CPU(X86_64)
+    void negq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+    }
+#endif
+
+    void negl_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+    }
+
+    void notl_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+    }
+
+    void notl_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+    }
+
+#if CPU(X86_64)
+    void notq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+    }
+
+    void notq_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+    }
+#endif
+
+    void orl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+    }
+
+    void orl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+    }
+
+    void orl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+    }
+
+    void orl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_OR_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void orl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+#if CPU(X86_64)
+    void orq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+    }
+
+    void orq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_OR_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+#else
+    void orl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void orl_rm(RegisterID src, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+    }
+#endif
+
+    void subl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+    }
+
+    void subl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+    }
+
+    void subl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+    }
+
+    void subl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_SUB_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+    
+    void subl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+#if CPU(X86_64)
+    void subq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
+    }
+
+    void subq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, offset);
+    }
+
+    void subq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, offset);
+    }
+
+    void subq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_SUB_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void subq_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+#else
+    void subl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+#endif
+
+    void xorl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+    }
+
+    void xorl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+    }
+
+    void xorl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+    }
+
+    void xorl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void xorl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_XOR_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+#if CPU(X86_64)
+    void xorq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+    }
+
+    void xorq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_XOR_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+    
+    void xorq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+    }
+
+#endif
+
+    void lzcnt_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_LZCNT, dst, src);
+    }
+
+    void lzcnt_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_LZCNT, dst, base, offset);
+    }
+
+#if CPU(X86_64)
+    void lzcntq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_LZCNT, dst, src);
+    }
+
+    void lzcntq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_LZCNT, dst, base, offset);
+    }
+#endif
+
+    void bsr_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSR, dst, src);
+    }
+
+    void bsr_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSR, dst, base, offset);
+    }
+
+#if CPU(X86_64)
+    void bsrq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSR, dst, src);
+    }
+
+    void bsrq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSR, dst, base, offset);
+    }
+#endif
+
+    void tzcnt_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_TZCNT, dst, src);
+    }
+
+#if CPU(X86_64)
+    void tzcntq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_TZCNT, dst, src);
+    }
+#endif
+
+    void bsf_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_BSF, dst, src);
+    }
+
+#if CPU(X86_64)
+    void bsfq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_BSF, dst, src);
+    }
+#endif
+
+private:
+    template
+    void shiftInstruction32(int imm, RegisterID dst)
+    {
+        if (imm == 1)
+            m_formatter.oneByteOp(OP_GROUP2_Ev1, op, dst);
+        else {
+            m_formatter.oneByteOp(OP_GROUP2_EvIb, op, dst);
+            m_formatter.immediate8(imm);
+        }
+    }
+public:
+
+    void sarl_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+
+    void sarl_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+    }
+    
+    void shrl_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+    
+    void shrl_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+    }
+
+    void shll_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+
+    void shll_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+    }
+
+    void rorl_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+
+    void rorl_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst);
+    }
+
+    void roll_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction32(imm, dst);
+    }
+
+    void roll_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst);
+    }
+
+#if CPU(X86_64)
+private:
+    template
+    void shiftInstruction64(int imm, RegisterID dst)
+    {
+        if (imm == 1)
+            m_formatter.oneByteOp64(OP_GROUP2_Ev1, op, dst);
+        else {
+            m_formatter.oneByteOp64(OP_GROUP2_EvIb, op, dst);
+            m_formatter.immediate8(imm);
+        }
+    }
+public:
+    void sarq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+    }
+
+    void sarq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void shrq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void shrq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+    }
+
+    void shlq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void shlq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+    }
+
+    void rorq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void rorq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst);
+    }
+
+    void rolq_i8r(int imm, RegisterID dst)
+    {
+        shiftInstruction64(imm, dst);
+    }
+
+    void rolq_CLr(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst);
+    }
+#endif // CPU(X86_64)
+
+    void imull_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+    }
+
+#if CPU(X86_64)
+    void imulq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src);
+    }
+#endif // CPU(X86_64)
+
+    void imull_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+    }
+
+    void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+        m_formatter.immediate32(value);
+    }
+
+    void divl_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, dst);
+    }
+
+    void idivl_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+    }
+
+#if CPU(X86_64)
+    void divq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_DIV, dst);
+    }
+
+    void idivq_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+    }
+#endif // CPU(X86_64)
+
+    // Comparisons:
+
+    void cmpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+    }
+
+    void cmpl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+    }
+
+    void cmpl_mr(int offset, RegisterID base, RegisterID src)
+    {
+        m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+    }
+
+    void cmpl_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp(OP_CMP_EAXIv);
+            else
+                m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void cmpl_ir_force32(int imm, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+        m_formatter.immediate32(imm);
+    }
+    
+    void cmpl_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+    
+    void cmpb_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+        m_formatter.immediate8(imm);
+    }
+    
+    void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+        m_formatter.immediate8(imm);
+    }
+    
+#if CPU(X86)
+    void cmpb_im(int imm, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
+        m_formatter.immediate8(imm);
+    }
+#endif
+
+    void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void cmpl_im_force32(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+        m_formatter.immediate32(imm);
+    }
+
+#if CPU(X86_64)
+    void cmpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+    }
+
+    void cmpq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+    }
+
+    void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset);
+    }
+
+    void cmpq_mr(int offset, RegisterID base, RegisterID src)
+    {
+        m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+    }
+
+    void cmpq_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            if (dst == X86Registers::eax)
+                m_formatter.oneByteOp64(OP_CMP_EAXIv);
+            else
+                m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void cmpq_im(int imm, int offset, RegisterID base)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+
+    void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate32(imm);
+        }
+    }
+#else
+    void cmpl_rm(RegisterID reg, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+    }
+
+    void cmpl_im(int imm, const void* addr)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+            m_formatter.immediate32(imm);
+        }
+    }
+#endif
+
+    void cmpw_ir(int imm, RegisterID dst)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.prefix(PRE_OPERAND_SIZE);
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.prefix(PRE_OPERAND_SIZE);
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+            m_formatter.immediate16(imm);
+        }
+    }
+
+    void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+    }
+
+    void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.prefix(PRE_OPERAND_SIZE);
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate8(imm);
+        } else {
+            m_formatter.prefix(PRE_OPERAND_SIZE);
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+            m_formatter.immediate16(imm);
+        }
+    }
+
+    void testl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+    }
+    
+    void testl_i32r(int imm, RegisterID dst)
+    {
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_TEST_EAXIv);
+        else
+            m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+        m_formatter.immediate32(imm);
+    }
+
+    void testl_i32m(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+        m_formatter.immediate32(imm);
+    }
+
+    void testb_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
+    }
+
+    void testb_im(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+        m_formatter.immediate8(imm);
+    }
+    
+    void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+        m_formatter.immediate8(imm);
+    }
+
+#if CPU(X86)
+    void testb_im(int imm, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
+        m_formatter.immediate8(imm);
+    }
+#endif
+
+    void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+        m_formatter.immediate32(imm);
+    }
+
+#if CPU(X86_64)
+    void testq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+    }
+
+    void testq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
+    }
+
+    void testq_i32r(int imm, RegisterID dst)
+    {
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_TEST_EAXIv);
+        else
+            m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+        m_formatter.immediate32(imm);
+    }
+
+    void testq_i32m(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+        m_formatter.immediate32(imm);
+    }
+
+    void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+        m_formatter.immediate32(imm);
+    }
+#endif 
+
+    void testw_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+    }
+    
+    void testb_i8r(int imm, RegisterID dst)
+    {
+        if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_TEST_ALIb);
+        else
+            m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+        m_formatter.immediate8(imm);
+    }
+
+    void setCC_r(Condition cond, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+    }
+
+    void sete_r(RegisterID dst)
+    {
+        m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
+    }
+
+    void setz_r(RegisterID dst)
+    {
+        sete_r(dst);
+    }
+
+    void setne_r(RegisterID dst)
+    {
+        m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
+    }
+
+    void setnz_r(RegisterID dst)
+    {
+        setne_r(dst);
+    }
+
+    void setnp_r(RegisterID dst)
+    {
+        m_formatter.twoByteOp8(setccOpcode(ConditionNP), (GroupOpcodeID)0, dst);
+    }
+
+    void setp_r(RegisterID dst)
+    {
+        m_formatter.twoByteOp8(setccOpcode(ConditionP), (GroupOpcodeID)0, dst);
+    }
+
+    // Various move ops:
+
+    void cdq()
+    {
+        m_formatter.oneByteOp(OP_CDQ);
+    }
+
+#if CPU(X86_64)
+    void cqo()
+    {
+        m_formatter.oneByteOp64(OP_CDQ);
+    }
+#endif
+
+    void fstps(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_ESCAPE_D9, ESCAPE_D9_FSTP_singleReal, base, offset);
+    }
+
+    void fstpl(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+    }
+
+    void xchgl_rr(RegisterID src, RegisterID dst)
+    {
+        if (src == X86Registers::eax)
+            m_formatter.oneByteOp(OP_XCHG_EAX, dst);
+        else if (dst == X86Registers::eax)
+            m_formatter.oneByteOp(OP_XCHG_EAX, src);
+        else
+            m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+    }
+
+    void xchgl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset);
+    }
+
+#if CPU(X86_64)
+    void xchgq_rr(RegisterID src, RegisterID dst)
+    {
+        if (src == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
+        else if (dst == X86Registers::eax)
+            m_formatter.oneByteOp64(OP_XCHG_EAX, src);
+        else
+            m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+    }
+
+    void xchgq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, offset);
+    }
+#endif
+
+    void movl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+    }
+    
+    void movl_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+    }
+
+    void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+    }
+
+    void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+    }
+    
+    void movl_mEAX(const void* addr)
+    {
+        m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if CPU(X86_64)
+        m_formatter.immediate64(reinterpret_cast(addr));
+#else
+        m_formatter.immediate32(reinterpret_cast(addr));
+#endif
+    }
+
+    void movl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+    }
+
+    void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+    }
+    
+    void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+    }
+
+    void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+    }
+
+    void movl_i32r(int imm, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+        m_formatter.immediate32(imm);
+    }
+
+    void movl_i32m(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+        m_formatter.immediate32(imm);
+    }
+    
+    void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
+        m_formatter.immediate32(imm);
+    }
+
+#if !CPU(X86_64)
+    void movb_i8m(int imm, const void* addr)
+    {
+        ASSERT(-128 <= imm && imm < 128);
+        m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+        m_formatter.immediate8(imm);
+    }
+#endif
+
+    void movb_i8m(int imm, int offset, RegisterID base)
+    {
+        ASSERT(-128 <= imm && imm < 128);
+        m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+        m_formatter.immediate8(imm);
+    }
+
+    void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        ASSERT(-128 <= imm && imm < 128);
+        m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+        m_formatter.immediate8(imm);
+    }
+
+#if !CPU(X86_64)
+    void movb_rm(RegisterID src, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_MOV_EbGb, src, addr);
+    }
+#endif
+    
+    void movb_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset);
+    }
+    
+    void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
+    }
+
+    void movw_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+
+        // FIXME: We often use oneByteOp8 for 16-bit operations. It's not clear that this is
+        // necessary. https://bugs.webkit.org/show_bug.cgi?id=153433
+        m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, offset);
+    }
+
+    void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
+    }
+
+    void movl_EAXm(const void* addr)
+    {
+        m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if CPU(X86_64)
+        m_formatter.immediate64(reinterpret_cast(addr));
+#else
+        m_formatter.immediate32(reinterpret_cast(addr));
+#endif
+    }
+
+#if CPU(X86_64)
+    void movq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+    }
+
+    void movq_rm(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+    }
+
+    void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+    }
+
+    void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+    }
+
+    void movq_mEAX(const void* addr)
+    {
+        m_formatter.oneByteOp64(OP_MOV_EAXOv);
+        m_formatter.immediate64(reinterpret_cast(addr));
+    }
+
+    void movq_EAXm(const void* addr)
+    {
+        m_formatter.oneByteOp64(OP_MOV_OvEAX);
+        m_formatter.immediate64(reinterpret_cast(addr));
+    }
+
+    void movq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+    }
+
+    void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+    }
+
+    void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+    }
+
+    void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+    }
+
+    void movq_i32m(int imm, int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+        m_formatter.immediate32(imm);
+    }
+
+    void movq_i64r(int64_t imm, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+        m_formatter.immediate64(imm);
+    }
+
+    void mov_i32r(int32_t imm, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, dst);
+        m_formatter.immediate32(imm);
+    }
+    
+    void movsxd_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+    }
+    
+    
+#else
+    void movl_rm(RegisterID src, const void* addr)
+    {
+        if (src == X86Registers::eax)
+            movl_EAXm(addr);
+        else 
+            m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+    }
+    
+    void movl_mr(const void* addr, RegisterID dst)
+    {
+        if (dst == X86Registers::eax)
+            movl_mEAX(addr);
+        else
+            m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+    }
+
+    void movl_i32m(int imm, const void* addr)
+    {
+        m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+        m_formatter.immediate32(imm);
+    }
+#endif
+
+    void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+    }
+
+    void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+    }
+
+    void movswl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
+    }
+
+    void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
+    }
+
+    void movzbl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
+    }
+    
+    void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
+    }
+
+#if !CPU(X86_64)
+    void movzbl_mr(const void* address, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, address);
+    }
+#endif
+
+    void movsbl_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
+    }
+    
+    void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
+    }
+
+    void movzbl_rr(RegisterID src, RegisterID dst)
+    {
+        // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+        // is in the range ESP-EDI, and the src would not have required a REX).  Unneeded
+        // REX prefixes are defined to be silently ignored by the processor.
+        m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+    }
+
+    void movsbl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVSX_GvEb, dst, src);
+    }
+
+    void movzwl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVZX_GvEw, dst, src);
+    }
+
+    void movswl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp8(OP2_MOVSX_GvEw, dst, src);
+    }
+
+    void cmovl_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, src);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmovel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionE), dst, src);
+    }
+    
+    void cmovnel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNE), dst, src);
+    }
+    
+    void cmovpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionP), dst, src);
+    }
+    
+    void cmovnpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNP), dst, src);
+    }
+
+#if CPU(X86_64)
+    void cmovq_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, src);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmoveq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionE), dst, src);
+    }
+
+    void cmovneq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNE), dst, src);
+    }
+
+    void cmovpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionP), dst, src);
+    }
+
+    void cmovnpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNP), dst, src);
+    }
+#else
+    void cmovl_mr(Condition cond, const void* addr, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, addr);
+    }
+#endif
+
+    void leal_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+    }
+
+    void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_LEA, dst, base, index, scale, offset);
+    }
+
+#if CPU(X86_64)
+    void leaq_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+    }
+
+    void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.oneByteOp64(OP_LEA, dst, base, index, scale, offset);
+    }
+#endif
+
+    // Flow control:
+
+    AssemblerLabel call()
+    {
+        m_formatter.oneByteOp(OP_CALL_rel32);
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel call(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+        return m_formatter.label();
+    }
+    
+    void call_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+    }
+
+    AssemblerLabel jmp()
+    {
+        m_formatter.oneByteOp(OP_JMP_rel32);
+        return m_formatter.immediateRel32();
+    }
+    
+    // Return a AssemblerLabel so we have a label to the jump, so we can use this
+    // To make a tail recursive call on x86-64.  The MacroAssembler
+    // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+    AssemblerLabel jmp_r(RegisterID dst)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+        return m_formatter.label();
+    }
+    
+    void jmp_m(int offset, RegisterID base)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+    }
+    
+    void jmp_m(int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, index, scale, offset);
+    }
+    
+#if !CPU(X86_64)
+    void jmp_m(const void* address)
+    {
+        m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
+    }
+#endif
+
+    AssemblerLabel jne()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionNE));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jnz()
+    {
+        return jne();
+    }
+
+    AssemblerLabel je()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionE));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jz()
+    {
+        return je();
+    }
+
+    AssemblerLabel jl()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionL));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jb()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionB));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jle()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionLE));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jbe()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionBE));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jge()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionGE));
+        return m_formatter.immediateRel32();
+    }
+
+    AssemblerLabel jg()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionG));
+        return m_formatter.immediateRel32();
+    }
+
+    AssemblerLabel ja()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionA));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jae()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionAE));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel jo()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionO));
+        return m_formatter.immediateRel32();
+    }
+
+    AssemblerLabel jnp()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionNP));
+        return m_formatter.immediateRel32();
+    }
+
+    AssemblerLabel jp()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionP));
+        return m_formatter.immediateRel32();
+    }
+    
+    AssemblerLabel js()
+    {
+        m_formatter.twoByteOp(jccRel32(ConditionS));
+        return m_formatter.immediateRel32();
+    }
+
+    AssemblerLabel jCC(Condition cond)
+    {
+        m_formatter.twoByteOp(jccRel32(cond));
+        return m_formatter.immediateRel32();
+    }
+
+    // SSE operations:
+
+    void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vaddsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void addsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vaddsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vaddsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void addss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vaddss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void addss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void addss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vaddss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vaddss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+#if !CPU(X86_64)
+    void addsd_mr(const void* address, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
+    }
+#endif
+
+    void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
+    void cvtsi2ss_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
+#if CPU(X86_64)
+    void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
+    void cvtsi2ssq_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+    }
+
+    void cvtsi2sdq_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+
+    void cvtsi2ssq_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+#endif
+
+    void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+
+    void cvtsi2ss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+    }
+
+#if !CPU(X86_64)
+    void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+    }
+#endif
+
+    void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+    }
+
+    void cvttss2si_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src);
+    }
+
+#if CPU(X86_64)
+    void cvttss2siq_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp64(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src);
+    }
+#endif
+
+    void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
+    }
+
+    void cvtsd2ss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, base, offset);
+    }
+
+    void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
+    }
+
+    void cvtss2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, base, offset);
+    }
+
+#if CPU(X86_64)
+    void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+    }
+#endif
+
+    void movd_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+    }
+
+    void movd_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
+    }
+
+#if CPU(X86_64)
+    void movmskpd_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src);
+    }
+
+    void movq_rr(XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
+    }
+
+    void movq_rr(RegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
+    }
+#endif
+
+    void movapd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_MOVAPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void movaps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_MOVAPS_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+    }
+    
+    void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+    }
+
+    void movss_rm(XMMRegisterID src, int offset, RegisterID base)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+    }
+    
+    void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+    }
+    
+    void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void movss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+#if !CPU(X86_64)
+    void movsd_mr(const void* address, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+    }
+    void movsd_rm(XMMRegisterID src, const void* address)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+    }
+#endif
+
+    void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vmulsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void mulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vmulsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vmulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void mulss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vmulss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void mulss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void mulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vmulss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vmulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(whichWord);
+    }
+
+    void psllq_i8r(int imm, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
+        m_formatter.immediate8(imm);
+    }
+
+    void psrlq_i8r(int imm, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
+        m_formatter.immediate8(imm);
+    }
+
+    void por_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vsubsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void subsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void subss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void vsubss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b);
+    }
+
+    void subss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void subss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset);
+    }
+
+    void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset);
+    }
+
+    void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+    {
+        m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale);
+    }
+
+    void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void ucomiss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void ucomiss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void divss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void divss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void andps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_ANDPS_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void xorps_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        if (src == dst) {
+            xorps_rr(src, dst);
+            return;
+        }
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void sqrtsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F2);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    void sqrtss_rr(XMMRegisterID src, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+    }
+
+    void sqrtss_mr(int offset, RegisterID base, XMMRegisterID dst)
+    {
+        m_formatter.prefix(PRE_SSE_F3);
+        m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset);
+    }
+
+    enum class RoundingType : uint8_t {
+        ToNearestWithTiesToEven = 0,
+        TowardNegativeInfiniti = 1,
+        TowardInfiniti = 2,
+        TowardZero = 3
+    };
+
+    void roundss_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundss_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, base, offset);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundsd_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, (RegisterID)src);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    void roundsd_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding)
+    {
+        m_formatter.prefix(PRE_SSE_66);
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, base, offset);
+        m_formatter.immediate8(static_cast(rounding));
+    }
+
+    // Misc instructions:
+
+    void int3()
+    {
+        m_formatter.oneByteOp(OP_INT3);
+    }
+    
+    void ret()
+    {
+        m_formatter.oneByteOp(OP_RET);
+    }
+
+    void predictNotTaken()
+    {
+        m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+    }
+    
+    void lock()
+    {
+        m_formatter.prefix(PRE_LOCK);
+    }
+    
+    void mfence()
+    {
+        m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_MFENCE);
+    }
+
+    // Assembler admin methods:
+
+    size_t codeSize() const
+    {
+        return m_formatter.codeSize();
+    }
+    
+    AssemblerLabel labelForWatchpoint()
+    {
+        AssemblerLabel result = m_formatter.label();
+        if (static_cast(result.m_offset) != m_indexOfLastWatchpoint)
+            result = label();
+        m_indexOfLastWatchpoint = result.m_offset;
+        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+        return result;
+    }
+    
+    AssemblerLabel labelIgnoringWatchpoints()
+    {
+        return m_formatter.label();
+    }
+
+    AssemblerLabel label()
+    {
+        AssemblerLabel result = m_formatter.label();
+        while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+            nop();
+            result = m_formatter.label();
+        }
+        return result;
+    }
+
+    AssemblerLabel align(int alignment)
+    {
+        while (!m_formatter.isAligned(alignment))
+            m_formatter.oneByteOp(OP_HLT);
+
+        return label();
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    void linkJump(AssemblerLabel from, AssemblerLabel to)
+    {
+        ASSERT(from.isSet());
+        ASSERT(to.isSet());
+
+        char* code = reinterpret_cast(m_formatter.data());
+        ASSERT(!reinterpret_cast(code + from.m_offset)[-1]);
+        setRel32(code + from.m_offset, code + to.m_offset);
+    }
+    
+    static void linkJump(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+
+        setRel32(reinterpret_cast(code) + from.m_offset, to);
+    }
+
+    static void linkCall(void* code, AssemblerLabel from, void* to)
+    {
+        ASSERT(from.isSet());
+
+        setRel32(reinterpret_cast(code) + from.m_offset, to);
+    }
+
+    static void linkPointer(void* code, AssemblerLabel where, void* value)
+    {
+        ASSERT(where.isSet());
+
+        setPointer(reinterpret_cast(code) + where.m_offset, value);
+    }
+
+    static void relinkJump(void* from, void* to)
+    {
+        setRel32(from, to);
+    }
+    
+    static void relinkJumpToNop(void* from)
+    {
+        setInt32(from, 0);
+    }
+    
+    static void relinkCall(void* from, void* to)
+    {
+        setRel32(from, to);
+    }
+    
+    static void repatchCompact(void* where, int32_t value)
+    {
+        ASSERT(value >= std::numeric_limits::min());
+        ASSERT(value <= std::numeric_limits::max());
+        setInt8(where, value);
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        setInt32(where, value);
+    }
+
+    static void repatchPointer(void* where, void* value)
+    {
+        setPointer(where, value);
+    }
+    
+    static void* readPointer(void* where)
+    {
+        return reinterpret_cast(where)[-1];
+    }
+
+    static void replaceWithJump(void* instructionStart, void* to)
+    {
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        uint8_t* dstPtr = reinterpret_cast(to);
+        intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
+        ptr[0] = static_cast(OP_JMP_rel32);
+        *reinterpret_cast(ptr + 1) = static_cast(distance);
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return 5;
+    }
+
+    static constexpr ptrdiff_t patchableJumpSize()
+    {
+        return 5;
+    }
+    
+#if CPU(X86_64)
+    static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
+    {
+        const unsigned instructionSize = 10; // REX.W MOV IMM64
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
+        ptr[1] = OP_MOV_EAXIv | (dst & 7);
+        
+        union {
+            uint64_t asWord;
+            uint8_t asBytes[8];
+        } u;
+        u.asWord = imm;
+        for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+            ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+    }
+
+    static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
+    {
+        // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
+        // FIXME: If the above is ever false then we need to make this smarter with respect to emitting 
+        // the REX byte.
+        ASSERT(dst == X86Registers::r11);
+        const unsigned instructionSize = 6; // REX MOV IMM32
+        const int rexBytes = 1;
+        const int opcodeBytes = 1;
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        ptr[0] = PRE_REX | (dst >> 3);
+        ptr[1] = OP_MOV_EAXIv | (dst & 7);
+        
+        union {
+            uint32_t asWord;
+            uint8_t asBytes[4];
+        } u;
+        u.asWord = imm;
+        for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+            ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+    }
+#endif
+
+    static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
+    {
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        ptr[0] = OP_GROUP1_EvIz;
+        ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
+        union {
+            uint32_t asWord;
+            uint8_t asBytes[4];
+        } u;
+        u.asWord = imm;
+        for (unsigned i = opcodeBytes + modRMBytes; i < static_cast(maxJumpReplacementSize()); ++i)
+            ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+    }
+    
+    static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
+    {
+        ASSERT_UNUSED(offset, !offset);
+        const int opcodeBytes = 1;
+        const int modRMBytes = 1;
+        ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+        ptr[0] = OP_GROUP1_EvIz;
+        ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
+        union {
+            uint32_t asWord;
+            uint8_t asBytes[4];
+        } u;
+        u.asWord = imm;
+        for (unsigned i = opcodeBytes + modRMBytes; i < static_cast(maxJumpReplacementSize()); ++i)
+            ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+    }
+    
+    static void replaceWithLoad(void* instructionStart)
+    {
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+#if CPU(X86_64)
+        if ((*ptr & ~15) == PRE_REX)
+            ptr++;
+#endif
+        switch (*ptr) {
+        case OP_MOV_GvEv:
+            break;
+        case OP_LEA:
+            *ptr = OP_MOV_GvEv;
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+    
+    static void replaceWithAddressComputation(void* instructionStart)
+    {
+        uint8_t* ptr = reinterpret_cast(instructionStart);
+#if CPU(X86_64)
+        if ((*ptr & ~15) == PRE_REX)
+            ptr++;
+#endif
+        switch (*ptr) {
+        case OP_MOV_GvEv:
+            *ptr = OP_LEA;
+            break;
+        case OP_LEA:
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+    
+    static unsigned getCallReturnOffset(AssemblerLabel call)
+    {
+        ASSERT(call.isSet());
+        return call.m_offset;
+    }
+
+    static void* getRelocatedAddress(void* code, AssemblerLabel label)
+    {
+        ASSERT(label.isSet());
+        return reinterpret_cast(reinterpret_cast(code) + label.m_offset);
+    }
+    
+    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+    {
+        return b.m_offset - a.m_offset;
+    }
+    
+    unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+    void nop()
+    {
+        m_formatter.oneByteOp(OP_NOP);
+    }
+
+    static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+    {
+        UNUSED_PARAM(isCopyingToExecutableMemory);
+#if CPU(X86_64)
+        static const uint8_t nops[10][10] = {
+            // nop
+            {0x90},
+            // xchg %ax,%ax
+            {0x66, 0x90},
+            // nopl (%[re]ax)
+            {0x0f, 0x1f, 0x00},
+            // nopl 8(%[re]ax)
+            {0x0f, 0x1f, 0x40, 0x08},
+            // nopl 8(%[re]ax,%[re]ax,1)
+            {0x0f, 0x1f, 0x44, 0x00, 0x08},
+            // nopw 8(%[re]ax,%[re]ax,1)
+            {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
+            // nopl 512(%[re]ax)
+            {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
+            // nopl 512(%[re]ax,%[re]ax,1)
+            {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+            // nopw 512(%[re]ax,%[re]ax,1)
+            {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+            // nopw %cs:512(%[re]ax,%[re]ax,1)
+            {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
+        };
+
+        uint8_t* where = reinterpret_cast(base);
+        while (size) {
+            unsigned nopSize = static_cast(std::min(size, 15));
+            unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10;
+            for (unsigned i = 0; i != numPrefixes; ++i)
+                *where++ = 0x66;
+
+            unsigned nopRest = nopSize - numPrefixes;
+            for (unsigned i = 0; i != nopRest; ++i)
+                *where++ = nops[nopRest-1][i];
+
+            size -= nopSize;
+        }
+#else
+        memset(base, OP_NOP, size);
+#endif
+    }
+
+    // This is a no-op on x86
+    ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
+
+private:
+
+    static void setPointer(void* where, void* value)
+    {
+        reinterpret_cast(where)[-1] = value;
+    }
+
+    static void setInt32(void* where, int32_t value)
+    {
+        reinterpret_cast(where)[-1] = value;
+    }
+    
+    static void setInt8(void* where, int8_t value)
+    {
+        reinterpret_cast(where)[-1] = value;
+    }
+
+    static void setRel32(void* from, void* to)
+    {
+        intptr_t offset = reinterpret_cast(to) - reinterpret_cast(from);
+        ASSERT(offset == static_cast(offset));
+
+        setInt32(from, offset);
+    }
+
+    class X86InstructionFormatter {
+        static const int maxInstructionSize = 16;
+
+    public:
+        enum ModRmMode {
+            ModRmMemoryNoDisp = 0,
+            ModRmMemoryDisp8 = 1 << 6,
+            ModRmMemoryDisp32 = 2 << 6,
+            ModRmRegister = 3 << 6,
+        };
+
+        // Legacy prefix bytes:
+        //
+        // These are emmitted prior to the instruction.
+
+        void prefix(OneByteOpcodeID pre)
+        {
+            m_buffer.putByte(pre);
+        }
+
+#if CPU(X86_64)
+        // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+        static bool byteRegRequiresRex(int reg)
+        {
+            static_assert(X86Registers::esp == 4, "Necessary condition for OR-masking");
+            return (reg >= X86Registers::esp);
+        }
+        static bool byteRegRequiresRex(int a, int b)
+        {
+            return byteRegRequiresRex(a | b);
+        }
+
+        // Registers r8 & above require a REX prefixe.
+        static bool regRequiresRex(int reg)
+        {
+            static_assert(X86Registers::r8 == 8, "Necessary condition for OR-masking");
+            return (reg >= X86Registers::r8);
+        }
+        static bool regRequiresRex(int a, int b)
+        {
+            return regRequiresRex(a | b);
+        }
+        static bool regRequiresRex(int a, int b, int c)
+        {
+            return regRequiresRex(a | b | c);
+        }
+#else
+        static bool byteRegRequiresRex(int) { return false; }
+        static bool byteRegRequiresRex(int, int) { return false; }
+        static bool regRequiresRex(int) { return false; }
+        static bool regRequiresRex(int, int) { return false; }
+        static bool regRequiresRex(int, int, int) { return false; }
+#endif
+
+        class SingleInstructionBufferWriter : public AssemblerBuffer::LocalWriter {
+        public:
+            SingleInstructionBufferWriter(AssemblerBuffer& buffer)
+                : AssemblerBuffer::LocalWriter(buffer, maxInstructionSize)
+            {
+            }
+
+            // Internals; ModRm and REX formatters.
+
+            static constexpr RegisterID noBase = X86Registers::ebp;
+            static constexpr RegisterID hasSib = X86Registers::esp;
+            static constexpr RegisterID noIndex = X86Registers::esp;
+
+#if CPU(X86_64)
+            static constexpr RegisterID noBase2 = X86Registers::r13;
+            static constexpr RegisterID hasSib2 = X86Registers::r12;
+
+            // Format a REX prefix byte.
+            ALWAYS_INLINE void emitRex(bool w, int r, int x, int b)
+            {
+                ASSERT(r >= 0);
+                ASSERT(x >= 0);
+                ASSERT(b >= 0);
+                putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+            }
+
+            // Used to plant a REX byte with REX.w set (for 64-bit operations).
+            ALWAYS_INLINE void emitRexW(int r, int x, int b)
+            {
+                emitRex(true, r, x, b);
+            }
+
+            // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+            // regRequiresRex() to check other registers (i.e. address base & index).
+            ALWAYS_INLINE void emitRexIf(bool condition, int r, int x, int b)
+            {
+                if (condition)
+                    emitRex(false, r, x, b);
+            }
+
+            // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+            ALWAYS_INLINE void emitRexIfNeeded(int r, int x, int b)
+            {
+                emitRexIf(regRequiresRex(r, x, b), r, x, b);
+            }
+#else
+            // No REX prefix bytes on 32-bit x86.
+            ALWAYS_INLINE void emitRexIf(bool, int, int, int) { }
+            ALWAYS_INLINE void emitRexIfNeeded(int, int, int) { }
+#endif
+
+            ALWAYS_INLINE void putModRm(ModRmMode mode, int reg, RegisterID rm)
+            {
+                putByteUnchecked(mode | ((reg & 7) << 3) | (rm & 7));
+            }
+
+            ALWAYS_INLINE void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+            {
+                ASSERT(mode != ModRmRegister);
+
+                putModRm(mode, reg, hasSib);
+                putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+            }
+
+            ALWAYS_INLINE void registerModRM(int reg, RegisterID rm)
+            {
+                putModRm(ModRmRegister, reg, rm);
+            }
+
+            ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+                        putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+                    else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                        putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+                        putByteUnchecked(offset);
+                    } else {
+                        putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+                        putIntUnchecked(offset);
+                    }
+                } else {
+#if CPU(X86_64)
+                    if (!offset && (base != noBase) && (base != noBase2))
+#else
+                    if (!offset && (base != noBase))
+#endif
+                        putModRm(ModRmMemoryNoDisp, reg, base);
+                    else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                        putModRm(ModRmMemoryDisp8, reg, base);
+                        putByteUnchecked(offset);
+                    } else {
+                        putModRm(ModRmMemoryDisp32, reg, base);
+                        putIntUnchecked(offset);
+                    }
+                }
+            }
+
+            ALWAYS_INLINE void memoryModRM_disp8(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+                ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+                    putByteUnchecked(offset);
+                } else {
+                    putModRm(ModRmMemoryDisp8, reg, base);
+                    putByteUnchecked(offset);
+                }
+            }
+
+            ALWAYS_INLINE void memoryModRM_disp32(int reg, RegisterID base, int offset)
+            {
+                // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+                if ((base == hasSib) || (base == hasSib2)) {
+#else
+                if (base == hasSib) {
+#endif
+                    putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+                    putIntUnchecked(offset);
+                } else {
+                    putModRm(ModRmMemoryDisp32, reg, base);
+                    putIntUnchecked(offset);
+                }
+            }
+        
+            ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+            {
+                ASSERT(index != noIndex);
+
+#if CPU(X86_64)
+                if (!offset && (base != noBase) && (base != noBase2))
+#else
+                if (!offset && (base != noBase))
+#endif
+                    putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+                else if (CAN_SIGN_EXTEND_8_32(offset)) {
+                    putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+                    putByteUnchecked(offset);
+                } else {
+                    putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+                    putIntUnchecked(offset);
+                }
+            }
+
+#if !CPU(X86_64)
+            ALWAYS_INLINE void memoryModRM(int reg, const void* address)
+            {
+                // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+                putModRm(ModRmMemoryNoDisp, reg, noBase);
+                putIntUnchecked(reinterpret_cast(address));
+            }
+#endif
+            ALWAYS_INLINE void twoBytesVex(OneByteOpcodeID simdPrefix, RegisterID inOpReg, RegisterID r)
+            {
+                putByteUnchecked(VexPrefix::TwoBytes);
+
+                uint8_t secondByte = vexEncodeSimdPrefix(simdPrefix);
+                secondByte |= (~inOpReg & 0xf) << 3;
+                secondByte |= !regRequiresRex(r) << 7;
+                putByteUnchecked(secondByte);
+            }
+
+            ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID x, RegisterID b)
+            {
+                putByteUnchecked(VexPrefix::ThreeBytes);
+
+                uint8_t secondByte = static_cast(impliedBytes);
+                secondByte |= !regRequiresRex(r) << 7;
+                secondByte |= !regRequiresRex(x) << 6;
+                secondByte |= !regRequiresRex(b) << 5;
+                putByteUnchecked(secondByte);
+
+                uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix);
+                thirdByte |= (~inOpReg & 0xf) << 3;
+                putByteUnchecked(thirdByte);
+            }
+
+            ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID b)
+            {
+                putByteUnchecked(VexPrefix::ThreeBytes);
+
+                uint8_t secondByte = static_cast(impliedBytes);
+                secondByte |= !regRequiresRex(r) << 7;
+                secondByte |= 1 << 6; // REX.X
+                secondByte |= !regRequiresRex(b) << 5;
+                putByteUnchecked(secondByte);
+
+                uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix);
+                thirdByte |= (~inOpReg & 0xf) << 3;
+                putByteUnchecked(thirdByte);
+            }
+        private:
+            uint8_t vexEncodeSimdPrefix(OneByteOpcodeID simdPrefix)
+            {
+                switch (simdPrefix) {
+                case 0x66:
+                    return 1;
+                case 0xF3:
+                    return 2;
+                case 0xF2:
+                    return 3;
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                }
+                return 0;
+            }
+
+        };
+
+        // Word-sized operands / no operand instruction formatters.
+        //
+        // In addition to the opcode, the following operand permutations are supported:
+        //   * None - instruction takes no operands.
+        //   * One register - the low three bits of the RegisterID are added into the opcode.
+        //   * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+        //   * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+        //   * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+        //
+        // For 32-bit x86 targets, the address operand may also be provided as a void*.
+        // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+        //
+        // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+        void oneByteOp(OneByteOpcodeID opcode)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(opcode);
+        }
+
+        void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(0, 0, reg);
+            writer.putByteUnchecked(opcode + (reg & 7));
+        }
+
+        void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp32(reg, base, offset);
+        }
+        
+        void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp8(reg, base, offset);
+        }
+
+        void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
+        }
+
+#if !CPU(X86_64)
+        void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, address);
+        }
+#endif
+
+        void twoByteOp(TwoByteOpcodeID opcode)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+        }
+
+        void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, index, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
+        }
+
+#if !CPU(X86_64)
+        void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, address);
+        }
+#endif
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(b))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, b);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(dest, b);
+        }
+
+        void vexNdsLigWigCommutativeTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b)
+        {
+            // Since this is a commutative operation, we can try switching the arguments.
+            if (regRequiresRex(b))
+                std::swap(a, b);
+            vexNdsLigWigTwoByteOp(simdPrefix, opcode, dest, a, b);
+        }
+
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(base))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, base);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(dest, base, offset);
+        }
+
+        void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, int offset, RegisterID base, RegisterID index, int scale)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            if (regRequiresRex(base, index))
+                writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, index, base);
+            else
+                writer.twoBytesVex(simdPrefix, a, dest);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(dest, base, index, scale, offset);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID base, int displacement)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIfNeeded(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(twoBytePrefix);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, displacement);
+        }
+
+#if CPU(X86_64)
+        // Quad-word-sized operands:
+        //
+        // Used to format 64-bit operantions, planting a REX.w prefix.
+        // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+        // the normal (non-'64'-postfixed) formatters should be used.
+
+        void oneByteOp64(OneByteOpcodeID opcode)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(0, 0, 0);
+            writer.putByteUnchecked(opcode);
+        }
+
+        void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(0, 0, reg);
+            writer.putByteUnchecked(opcode + (reg & 7));
+        }
+
+        void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp32(reg, base, offset);
+        }
+        
+        void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM_disp8(reg, base, offset);
+        }
+
+        void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, 0, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexW(reg, index, base);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
+        }
+#endif
+
+        // Byte-operands:
+        //
+        // These methods format byte operations.  Byte operations differ from the normal
+        // formatters in the circumstances under which they will decide to emit REX prefixes.
+        // These should be used where any register operand signifies a byte register.
+        //
+        // The disctinction is due to the handling of register numbers in the range 4..7 on
+        // x86-64.  These register numbers may either represent the second byte of the first
+        // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+        //
+        // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+        // be accessed where a REX prefix is present), these are likely best treated as
+        // deprecated.  In order to ensure the correct registers spl..dil are selected a
+        // REX prefix will be emitted for any byte register operand in the range 4..15.
+        //
+        // These formatters may be used in instructions where a mix of operand sizes, in which
+        // case an unnecessary REX will be emitted, for example:
+        //     movzbl %al, %edi
+        // In this case a REX will be planted since edi is 7 (and were this a byte operand
+        // a REX would be required to specify dil instead of bh).  Unneeded REX prefixes will
+        // be silently ignored by the processor.
+        //
+        // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+        // is provided to check byte register operands.
+
+        void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(groupOp, rm);
+        }
+
+        void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, base), reg, 0, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, offset);
+        }
+
+        void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index, base), reg, index, base);
+            writer.putByteUnchecked(opcode);
+            writer.memoryModRM(reg, base, index, scale, offset);
+        }
+
+        void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(reg, rm);
+        }
+
+        void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+        {
+            SingleInstructionBufferWriter writer(m_buffer);
+            writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+            writer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            writer.putByteUnchecked(opcode);
+            writer.registerModRM(groupOp, rm);
+        }
+
+        // Immediates:
+        //
+        // An immedaite should be appended where appropriate after an op has been emitted.
+        // The writes are unchecked since the opcode formatters above will have ensured space.
+
+        void immediate8(int imm)
+        {
+            m_buffer.putByteUnchecked(imm);
+        }
+
+        void immediate16(int imm)
+        {
+            m_buffer.putShortUnchecked(imm);
+        }
+
+        void immediate32(int imm)
+        {
+            m_buffer.putIntUnchecked(imm);
+        }
+
+        void immediate64(int64_t imm)
+        {
+            m_buffer.putInt64Unchecked(imm);
+        }
+
+        AssemblerLabel immediateRel32()
+        {
+            m_buffer.putIntUnchecked(0);
+            return label();
+        }
+
+        // Administrative methods:
+
+        size_t codeSize() const { return m_buffer.codeSize(); }
+        AssemblerLabel label() const { return m_buffer.label(); }
+        bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+        void* data() const { return m_buffer.data(); }
+
+        unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+    public:
+        AssemblerBuffer m_buffer;
+    } m_formatter;
+    int m_indexOfLastWatchpoint;
+    int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
diff --git a/b3/B3ArgumentRegValue.cpp b/b3/B3ArgumentRegValue.cpp
new file mode 100644
index 0000000..594d0d6
--- /dev/null
+++ b/b3/B3ArgumentRegValue.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ArgumentRegValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+ArgumentRegValue::~ArgumentRegValue()
+{
+}
+
+void ArgumentRegValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_reg);
+}
+
+Value* ArgumentRegValue::cloneImpl() const
+{
+    return new ArgumentRegValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ArgumentRegValue.h b/b3/B3ArgumentRegValue.h
new file mode 100644
index 0000000..55b365f
--- /dev/null
+++ b/b3/B3ArgumentRegValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "Reg.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ArgumentRegValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ArgumentReg; }
+    
+    ~ArgumentRegValue();
+
+    Reg argumentReg() const { return m_reg; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ArgumentRegValue(Origin origin, Reg reg)
+        : Value(CheckedOpcode, ArgumentReg, reg.isGPR() ? pointerType() : Double, origin)
+        , m_reg(reg)
+    {
+        ASSERT(reg.isSet());
+    }
+
+    Reg m_reg;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BasicBlock.cpp b/b3/B3BasicBlock.cpp
new file mode 100644
index 0000000..63a4e58
--- /dev/null
+++ b/b3/B3BasicBlock.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+    : m_index(index)
+    , m_frequency(frequency)
+{
+}
+
+BasicBlock::~BasicBlock()
+{
+}
+
+void BasicBlock::append(Value* value)
+{
+    m_values.append(value);
+    value->owner = this;
+}
+
+void BasicBlock::appendNonTerminal(Value* value)
+{
+    m_values.append(m_values.last());
+    m_values[m_values.size() - 1] = value;
+    value->owner = this;
+}
+
+void BasicBlock::removeLast(Procedure& proc)
+{
+    ASSERT(!m_values.isEmpty());
+    proc.deleteValue(m_values.takeLast());
+}
+
+void BasicBlock::replaceLast(Procedure& proc, Value* value)
+{
+    removeLast(proc);
+    append(value);
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Origin origin, Type type, int64_t value)
+{
+    Value* result = proc.addIntConstant(origin, type, value);
+    append(result);
+    return result;
+}
+
+Value* BasicBlock::appendIntConstant(Procedure& proc, Value* likeValue, int64_t value)
+{
+    return appendIntConstant(proc, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* BasicBlock::appendBoolConstant(Procedure& proc, Origin origin, bool value)
+{
+    return appendIntConstant(proc, origin, Int32, value ? 1 : 0);
+}
+
+void BasicBlock::clearSuccessors()
+{
+    m_successors.clear();
+}
+
+void BasicBlock::appendSuccessor(FrequentedBlock target)
+{
+    m_successors.append(target);
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock target)
+{
+    m_successors.resize(1);
+    m_successors[0] = target;
+}
+
+void BasicBlock::setSuccessors(FrequentedBlock taken, FrequentedBlock notTaken)
+{
+    m_successors.resize(2);
+    m_successors[0] = taken;
+    m_successors[1] = notTaken;
+}
+
+bool BasicBlock::replaceSuccessor(BasicBlock* from, BasicBlock* to)
+{
+    bool result = false;
+    for (BasicBlock*& successor : successorBlocks()) {
+        if (successor == from) {
+            successor = to;
+            result = true;
+            
+            // Keep looping because a successor may be mentioned multiple times, like in a Switch.
+        }
+    }
+    return result;
+}
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+    return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+    return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+    return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::updatePredecessorsAfter()
+{
+    B3::updatePredecessorsAfter(this);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(const Procedure& proc, PrintStream& out) const
+{
+    out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+    if (predecessors().size())
+        out.print("  Predecessors: ", pointerListDump(predecessors()), "\n");
+    for (Value* value : *this)
+        out.print("    ", B3::deepDump(proc, value), "\n");
+    if (!successors().isEmpty()) {
+        out.print("  Successors: ");
+        if (size())
+            last()->dumpSuccessors(this, out);
+        else
+            out.print(listDump(successors()));
+        out.print("\n");
+    }
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin)
+{
+    RELEASE_ASSERT(opcode == Oops || opcode == Return);
+    clearSuccessors();
+    return appendNew(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* value)
+{
+    RELEASE_ASSERT(opcode == Return);
+    clearSuccessors();
+    return appendNew(proc, opcode, origin, value);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, const FrequentedBlock& target)
+{
+    RELEASE_ASSERT(opcode == Jump);
+    setSuccessors(target);
+    return appendNew(proc, opcode, origin);
+}
+
+Value* BasicBlock::appendNewControlValue(Procedure& proc, Opcode opcode, Origin origin, Value* predicate, const FrequentedBlock& taken, const FrequentedBlock& notTaken)
+{
+    RELEASE_ASSERT(opcode == Branch);
+    setSuccessors(taken, notTaken);
+    return appendNew(proc, opcode, origin, predicate);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BasicBlock.h b/b3/B3BasicBlock.h
new file mode 100644
index 0000000..11f4668
--- /dev/null
+++ b/b3/B3BasicBlock.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include "B3Opcode.h"
+#include "B3Origin.h"
+#include "B3SuccessorCollection.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class BlockInsertionSet;
+class InsertionSet;
+class Procedure;
+class Value;
+
+class BasicBlock {
+    WTF_MAKE_NONCOPYABLE(BasicBlock);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef Vector ValueList;
+    typedef Vector PredecessorList;
+    typedef Vector SuccessorList;
+
+    static const char* const dumpPrefix;
+
+    ~BasicBlock();
+
+    unsigned index() const { return m_index; }
+
+    ValueList::iterator begin() { return m_values.begin(); }
+    ValueList::iterator end() { return m_values.end(); }
+    ValueList::const_iterator begin() const { return m_values.begin(); }
+    ValueList::const_iterator end() const { return m_values.end(); }
+
+    size_t size() const { return m_values.size(); }
+    Value* at(size_t index) const { return m_values[index]; }
+    Value*& at(size_t index) { return m_values[index]; }
+
+    Value* last() const { return m_values.last(); }
+    Value*& last() { return m_values.last(); }
+
+    const ValueList& values() const { return m_values; }
+    ValueList& values() { return m_values; }
+
+    JS_EXPORT_PRIVATE void append(Value*);
+    JS_EXPORT_PRIVATE void appendNonTerminal(Value*);
+    JS_EXPORT_PRIVATE void replaceLast(Procedure&, Value*);
+
+    template
+    ValueType* appendNew(Procedure&, Arguments...);
+    template
+    ValueType* appendNewNonTerminal(Procedure&, Arguments...);
+
+    JS_EXPORT_PRIVATE Value* appendIntConstant(Procedure&, Origin, Type, int64_t value);
+    Value* appendIntConstant(Procedure&, Value* likeValue, int64_t value);
+    Value* appendBoolConstant(Procedure&, Origin, bool);
+
+    void removeLast(Procedure&);
+    
+    template
+    ValueType* replaceLastWithNew(Procedure&, Arguments...);
+
+    unsigned numSuccessors() const { return m_successors.size(); }
+    const FrequentedBlock& successor(unsigned index) const { return m_successors[index]; }
+    FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+    const SuccessorList& successors() const { return m_successors; }
+    SuccessorList& successors() { return m_successors; }
+    
+    void clearSuccessors();
+    JS_EXPORT_PRIVATE void appendSuccessor(FrequentedBlock);
+    JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock);
+    JS_EXPORT_PRIVATE void setSuccessors(FrequentedBlock, FrequentedBlock);
+
+    BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+    BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+    SuccessorCollection successorBlocks()
+    {
+        return SuccessorCollection(successors());
+    }
+    SuccessorCollection successorBlocks() const
+    {
+        return SuccessorCollection(successors());
+    }
+
+    bool replaceSuccessor(BasicBlock* from, BasicBlock* to);
+    
+    // This is only valid for Jump and Branch.
+    const FrequentedBlock& taken() const;
+    FrequentedBlock& taken();
+    // This is only valid for Branch.
+    const FrequentedBlock& notTaken() const;
+    FrequentedBlock& notTaken();
+    // This is only valid for Branch and Switch.
+    const FrequentedBlock& fallThrough() const;
+    FrequentedBlock& fallThrough();
+
+    unsigned numPredecessors() const { return m_predecessors.size(); }
+    BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+    BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+    const PredecessorList& predecessors() const { return m_predecessors; }
+    PredecessorList& predecessors() { return m_predecessors; }
+    bool containsPredecessor(BasicBlock* block) { return m_predecessors.contains(block); }
+
+    bool addPredecessor(BasicBlock*);
+    bool removePredecessor(BasicBlock*);
+    bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+
+    // Update predecessors starting with the successors of this block.
+    void updatePredecessorsAfter();
+
+    double frequency() const { return m_frequency; }
+
+    void dump(PrintStream&) const;
+    void deepDump(const Procedure&, PrintStream&) const;
+
+    // These are deprecated method for compatibility with the old ControlValue class. Don't use them
+    // in new code.
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159440
+    
+    // Use this for Oops.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin);
+    // Use this for Return.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*);
+    // Use this for Jump.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, const FrequentedBlock&);
+    // Use this for Branch.
+    JS_EXPORT_PRIVATE Value* appendNewControlValue(Procedure&, Opcode, Origin, Value*, const FrequentedBlock&, const FrequentedBlock&);
+    
+private:
+    friend class BlockInsertionSet;
+    friend class InsertionSet;
+    friend class Procedure;
+    
+    // Instantiate via Procedure.
+    BasicBlock(unsigned index, double frequency);
+
+    unsigned m_index;
+    ValueList m_values;
+    PredecessorList m_predecessors;
+    SuccessorList m_successors;
+    double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+    DeepBasicBlockDump(const Procedure& proc, const BasicBlock* block)
+        : m_proc(proc)
+        , m_block(block)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_block)
+            m_block->deepDump(m_proc, out);
+        else
+            out.print("");
+    }
+
+private:
+    const Procedure& m_proc;
+    const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const Procedure& proc, const BasicBlock* block)
+{
+    return DeepBasicBlockDump(proc, block);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BasicBlockInlines.h b/b3/B3BasicBlockInlines.h
new file mode 100644
index 0000000..26c2df4
--- /dev/null
+++ b/b3/B3BasicBlockInlines.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* BasicBlock::appendNew(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    append(result);
+    return result;
+}
+
+template
+ValueType* BasicBlock::appendNewNonTerminal(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    appendNonTerminal(result);
+    return result;
+}
+
+template
+ValueType* BasicBlock::replaceLastWithNew(Procedure& procedure, Arguments... arguments)
+{
+    ValueType* result = procedure.add(arguments...);
+    replaceLast(procedure, result);
+    return result;
+}
+
+inline const FrequentedBlock& BasicBlock::taken() const
+{
+    ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+    return m_successors[0];
+}
+
+inline FrequentedBlock& BasicBlock::taken()
+{
+    ASSERT(last()->opcode() == Jump || last()->opcode() == Branch);
+    return m_successors[0];
+}
+
+inline const FrequentedBlock& BasicBlock::notTaken() const
+{
+    ASSERT(last()->opcode() == Branch);
+    return m_successors[1];
+}
+
+inline FrequentedBlock& BasicBlock::notTaken()
+{
+    ASSERT(last()->opcode() == Branch);
+    return m_successors[1];
+}
+
+inline const FrequentedBlock& BasicBlock::fallThrough() const
+{
+    ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+    return m_successors.last();
+}
+
+inline FrequentedBlock& BasicBlock::fallThrough()
+{
+    ASSERT(last()->opcode() == Branch || last()->opcode() == Switch);
+    return m_successors.last();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BasicBlockUtils.h b/b3/B3BasicBlockUtils.h
new file mode 100644
index 0000000..e5998c8
--- /dev/null
+++ b/b3/B3BasicBlockUtils.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+template
+bool addPredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+    auto& predecessors = block->predecessors();
+
+    if (predecessors.contains(predecessor))
+        return false;
+
+    predecessors.append(predecessor);
+    return true;
+}
+
+template
+bool removePredecessor(BasicBlock* block, BasicBlock* predecessor)
+{
+    auto& predecessors = block->predecessors();
+    for (unsigned i = 0; i < predecessors.size(); ++i) {
+        if (predecessors[i] == predecessor) {
+            predecessors[i--] = predecessors.last();
+            predecessors.removeLast();
+            ASSERT(!predecessors.contains(predecessor));
+            return true;
+        }
+    }
+    return false;
+}
+
+template
+bool replacePredecessor(BasicBlock* block, BasicBlock* from, BasicBlock* to)
+{
+    bool changed = false;
+    // We do it this way because 'to' may already be a predecessor of 'block'.
+    changed |= removePredecessor(block, from);
+    changed |= addPredecessor(block, to);
+    return changed;
+}
+
+template
+void updatePredecessorsAfter(BasicBlock* root)
+{
+    Vector worklist;
+    worklist.append(root);
+    while (!worklist.isEmpty()) {
+        BasicBlock* block = worklist.takeLast();
+        for (BasicBlock* successor : block->successorBlocks()) {
+            if (addPredecessor(successor, block))
+                worklist.append(successor);
+        }
+    }
+}
+
+template
+void clearPredecessors(Vector>& blocks)
+{
+    for (auto& block : blocks) {
+        if (block)
+            block->predecessors().resize(0);
+    }
+}
+
+template
+void recomputePredecessors(Vector>& blocks)
+{
+    clearPredecessors(blocks);
+    updatePredecessorsAfter(blocks[0].get());
+}
+
+template
+bool isBlockDead(BasicBlock* block)
+{
+    if (!block)
+        return false;
+    if (!block->index())
+        return false;
+    return block->predecessors().isEmpty();
+}
+
+template
+Vector blocksInPreOrder(BasicBlock* root)
+{
+    Vector result;
+    GraphNodeWorklist> worklist;
+    worklist.push(root);
+    while (BasicBlock* block = worklist.pop()) {
+        result.append(block);
+        for (BasicBlock* successor : block->successorBlocks())
+            worklist.push(successor);
+    }
+    return result;
+}
+
+template
+Vector blocksInPostOrder(BasicBlock* root)
+{
+    Vector result;
+    PostOrderGraphNodeWorklist> worklist;
+    worklist.push(root);
+    while (GraphNodeWithOrder item = worklist.pop()) {
+        switch (item.order) {
+        case GraphVisitOrder::Pre:
+            worklist.pushPost(item.node);
+            for (BasicBlock* successor : item.node->successorBlocks())
+                worklist.push(successor);
+            break;
+        case GraphVisitOrder::Post:
+            result.append(item.node);
+            break;
+        }
+    }
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BlockInsertionSet.cpp b/b3/B3BlockInsertionSet.cpp
new file mode 100644
index 0000000..76a1668
--- /dev/null
+++ b/b3/B3BlockInsertionSet.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BlockInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+BlockInsertionSet::BlockInsertionSet(Procedure &proc)
+    : m_proc(proc)
+{
+}
+
+BlockInsertionSet::~BlockInsertionSet() { }
+
+void BlockInsertionSet::insert(BlockInsertion&& insertion)
+{
+    m_insertions.append(WTFMove(insertion));
+}
+
+BasicBlock* BlockInsertionSet::insert(unsigned index, double frequency)
+{
+    std::unique_ptr block(new BasicBlock(UINT_MAX, frequency));
+    BasicBlock* result = block.get();
+    insert(BlockInsertion(index, WTFMove(block)));
+    return result;
+}
+
+BasicBlock* BlockInsertionSet::insertBefore(BasicBlock* before, double frequency)
+{
+    return insert(before->index(), frequency == frequency ? frequency : before->frequency());
+}
+
+BasicBlock* BlockInsertionSet::insertAfter(BasicBlock* after, double frequency)
+{
+    return insert(after->index() + 1, frequency == frequency ? frequency : after->frequency());
+}
+
+BasicBlock* BlockInsertionSet::splitForward(
+    BasicBlock* block, unsigned& valueIndex, InsertionSet* insertionSet, double frequency)
+{
+    Value* value = block->at(valueIndex);
+
+    // Create a new block that will go just before 'block', and make it contain everything prior
+    // to 'valueIndex'.
+    BasicBlock* result = insertBefore(block, frequency);
+    result->m_values.resize(valueIndex + 1);
+    for (unsigned i = valueIndex; i--;)
+        result->m_values[i] = block->m_values[i];
+
+    // Make the new block jump to 'block'.
+    result->m_values[valueIndex] = m_proc.add(Jump, value->origin());
+    result->setSuccessors(FrequentedBlock(block));
+
+    // If we had inserted things into 'block' before this, execute those insertions now.
+    if (insertionSet)
+        insertionSet->execute(result);
+
+    // Remove everything prior to 'valueIndex' from 'block', since those things are now in the
+    // new block.
+    block->m_values.remove(0, valueIndex);
+
+    // This is being used in a forward loop over 'block'. Update the index of the loop so that
+    // it can continue to the next block.
+    valueIndex = 0;
+
+    // Fixup the predecessors of 'block'. They now must jump to the new block.
+    result->predecessors() = WTFMove(block->predecessors());
+    block->addPredecessor(result);
+    for (BasicBlock* predecessor : result->predecessors())
+        predecessor->replaceSuccessor(block, result);
+
+    return result;
+}
+
+bool BlockInsertionSet::execute()
+{
+    if (m_insertions.isEmpty())
+        return false;
+    
+    // We allow insertions to be given to us in any order. So, we need to sort them before
+    // running WTF::executeInsertions. We strongly prefer a stable sort and we want it to be
+    // fast, so we use bubble sort.
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+
+    executeInsertions(m_proc.m_blocks, m_insertions);
+    
+    // Prune out empty entries. This isn't strictly necessary but it's
+    // healthy to keep the block list from growing.
+    m_proc.m_blocks.removeAllMatching(
+        [&] (std::unique_ptr& blockPtr) -> bool {
+            return !blockPtr;
+        });
+    
+    // Make sure that the blocks know their new indices.
+    for (unsigned i = 0; i < m_proc.m_blocks.size(); ++i)
+        m_proc.m_blocks[i]->m_index = i;
+    
+    return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3BlockInsertionSet.h b/b3/B3BlockInsertionSet.h
new file mode 100644
index 0000000..b316f64
--- /dev/null
+++ b/b3/B3BlockInsertionSet.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class InsertionSet;
+
+typedef WTF::Insertion> BlockInsertion;
+
+class BlockInsertionSet {
+public:
+    BlockInsertionSet(Procedure&);
+    ~BlockInsertionSet();
+    
+    void insert(BlockInsertion&&);
+
+    // Insert a new block at a given index.
+    BasicBlock* insert(unsigned index, double frequency = PNaN);
+
+    // Inserts a new block before the given block. Usually you will not pass the frequency
+    // argument. Passing PNaN causes us to just use the frequency of the 'before' block. That's
+    // usually what you want.
+    BasicBlock* insertBefore(BasicBlock* before, double frequency = PNaN);
+
+    // Inserts a new block after the given block.
+    BasicBlock* insertAfter(BasicBlock* after, double frequency = PNaN);
+
+    // A helper to split a block when forward iterating over it. It creates a new block to hold
+    // everything before the instruction at valueIndex. The current block is left with
+    // everything at and after valueIndex. If the optional InsertionSet is provided, it will get
+    // executed on the newly created block - this makes sense if you had previously inserted
+    // things into the original block, since the newly created block will be indexed identically
+    // to how this block was indexed for all values prior to valueIndex. After this runs, it sets
+    // valueIndex to zero. This allows you to use this method for things like:
+    //
+    // for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+    //     Value* value = block->at(valueIndex);
+    //     if (value->opcode() == Foo) {
+    //         BasicBlock* predecessor =
+    //             m_blockInsertionSet.splitForward(block, valueIndex, &m_insertionSet);
+    //         ... // Now you can append to predecessor, insert new blocks before 'block', and
+    //         ... // you can use m_insertionSet to insert more thing before 'value'.
+    //         predecessor->updatePredecessorsAfter();
+    //     }
+    // }
+    //
+    // Note how usually this idiom ends in a all to updatePredecessorsAftter(), which ensures
+    // that the predecessors involved in any of the new control flow that you've created are up
+    // to date.
+    BasicBlock* splitForward(
+        BasicBlock*, unsigned& valueIndex, InsertionSet* = nullptr,
+        double frequency = PNaN);
+    
+    bool execute();
+
+private:
+    Procedure& m_proc;
+    Vector m_insertions;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BlockWorklist.h b/b3/B3BlockWorklist.h
new file mode 100644
index 0000000..6fa197c
--- /dev/null
+++ b/b3/B3BlockWorklist.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GraphNodeWorklist> BlockWorklist;
+
+// When you say BlockWith you should read it as "block with an int".
+template using BlockWith = GraphNodeWith;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template using ExtendedBlockWorklist = ExtendedGraphNodeWorklist>;
+
+typedef GraphVisitOrder VisitOrder;
+
+typedef GraphNodeWithOrder BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist> PostOrderBlockWorklist;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BottomProvider.h b/b3/B3BottomProvider.h
new file mode 100644
index 0000000..9a977f0
--- /dev/null
+++ b/b3/B3BottomProvider.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+
+namespace JSC { namespace B3 {
+
+// This exists because we cannot convert values to constants in-place.
+// FIXME: https://bugs.webkit.org/show_bug.cgi?id=159119
+
+class BottomProvider {
+public:
+    BottomProvider(InsertionSet& insertionSet, size_t index)
+        : m_insertionSet(&insertionSet)
+        , m_index(index)
+    {
+    }
+    
+    Value* operator()(Origin origin, Type type) const
+    {
+        return m_insertionSet->insertBottom(m_index, origin, type);
+    }
+    
+private:
+    InsertionSet* m_insertionSet;
+    size_t m_index;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3BreakCriticalEdges.cpp b/b3/B3BreakCriticalEdges.cpp
new file mode 100644
index 0000000..abdf0ce
--- /dev/null
+++ b/b3/B3BreakCriticalEdges.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3BreakCriticalEdges.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+void breakCriticalEdges(Procedure& proc)
+{
+    BlockInsertionSet insertionSet(proc);
+    
+    for (BasicBlock* block : proc) {
+        if (block->numSuccessors() <= 1)
+            continue;
+
+        for (BasicBlock*& successor : block->successorBlocks()) {
+            if (successor->numPredecessors() <= 1)
+                continue;
+
+            BasicBlock* pad =
+                insertionSet.insertBefore(successor, successor->frequency());
+            pad->appendNew(proc, Jump, successor->at(0)->origin());
+            pad->setSuccessors(FrequentedBlock(successor));
+            pad->addPredecessor(block);
+            successor->replacePredecessor(block, pad);
+            successor = pad;
+        }
+    }
+
+    insertionSet.execute();
+    proc.invalidateCFG();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3BreakCriticalEdges.h b/b3/B3BreakCriticalEdges.h
new file mode 100644
index 0000000..75c324f
--- /dev/null
+++ b/b3/B3BreakCriticalEdges.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+void breakCriticalEdges(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CCallValue.cpp b/b3/B3CCallValue.cpp
new file mode 100644
index 0000000..518d723
--- /dev/null
+++ b/b3/B3CCallValue.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CCallValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CCallValue::~CCallValue()
+{
+}
+
+Value* CCallValue::cloneImpl() const
+{
+    return new CCallValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3CCallValue.h b/b3/B3CCallValue.h
new file mode 100644
index 0000000..44ec349
--- /dev/null
+++ b/b3/B3CCallValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE CCallValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == CCall; }
+
+    ~CCallValue();
+
+    Effects effects;
+
+protected:
+    Value* cloneImpl() const override;
+    
+private:
+    friend class Procedure;
+
+    template
+    CCallValue(Type type, Origin origin, Arguments... arguments)
+        : Value(CheckedOpcode, CCall, type, origin, arguments...)
+        , effects(Effects::forCall())
+    {
+        RELEASE_ASSERT(numChildren() >= 1);
+    }
+
+    template
+    CCallValue(Type type, Origin origin, const Effects& effects, Arguments... arguments)
+        : Value(CheckedOpcode, CCall, type, origin, arguments...)
+        , effects(effects)
+    {
+        RELEASE_ASSERT(numChildren() >= 1);
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CFG.h b/b3/B3CFG.h
new file mode 100644
index 0000000..3d1418e
--- /dev/null
+++ b/b3/B3CFG.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class CFG {
+    WTF_MAKE_NONCOPYABLE(CFG);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef BasicBlock* Node;
+    typedef IndexSet Set;
+    template using Map = IndexMap;
+    typedef Vector List;
+
+    CFG(Procedure& proc)
+        : m_proc(proc)
+    {
+    }
+
+    Node root() { return m_proc[0]; }
+
+    template
+    Map newMap() { return IndexMap(m_proc.size()); }
+
+    SuccessorCollection successors(Node node) { return node->successorBlocks(); }
+    BasicBlock::PredecessorList& predecessors(Node node) { return node->predecessors(); }
+
+    unsigned index(Node node) const { return node->index(); }
+    Node node(unsigned index) const { return m_proc[index]; }
+    unsigned numNodes() const { return m_proc.size(); }
+
+    PointerDump dump(Node node) const { return pointerDump(node); }
+
+    void dump(PrintStream& out) const
+    {
+        m_proc.dump(out);
+    }
+
+private:
+    Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CaseCollection.cpp b/b3/B3CaseCollection.cpp
new file mode 100644
index 0000000..5221eba
--- /dev/null
+++ b/b3/B3CaseCollection.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CaseCollection.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+void CaseCollection::dump(PrintStream& out) const
+{
+    CommaPrinter comma;
+    for (SwitchCase switchCase : *this)
+        out.print(comma, switchCase);
+    out.print(comma, "default->", fallThrough());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3CaseCollection.h b/b3/B3CaseCollection.h
new file mode 100644
index 0000000..c45cc64
--- /dev/null
+++ b/b3/B3CaseCollection.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SwitchCase.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class SwitchValue;
+
+// NOTE: You'll always want to include B3CaseCollectionInlines.h when you use this.
+
+class CaseCollection {
+public:
+    CaseCollection()
+    {
+    }
+    
+    CaseCollection(const SwitchValue* terminal, const BasicBlock* owner)
+        : m_switch(terminal)
+        , m_owner(owner)
+    {
+    }
+    
+    const FrequentedBlock& fallThrough() const;
+
+    unsigned size() const;
+    SwitchCase at(unsigned index) const;
+    
+    SwitchCase operator[](unsigned index) const
+    {
+        return at(index);
+    }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const CaseCollection& collection, unsigned index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        SwitchCase operator*()
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const CaseCollection* m_collection;
+        unsigned m_index;
+    };
+
+    typedef iterator const_iterator;
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    const SwitchValue* m_switch { nullptr };
+    const BasicBlock* m_owner { nullptr };
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CaseCollectionInlines.h b/b3/B3CaseCollectionInlines.h
new file mode 100644
index 0000000..237a568
--- /dev/null
+++ b/b3/B3CaseCollectionInlines.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchValue.h"
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+inline const FrequentedBlock& CaseCollection::fallThrough() const
+{
+    return m_owner->fallThrough();
+}
+
+inline unsigned CaseCollection::size() const
+{
+    return m_switch->numCaseValues();
+}
+
+inline SwitchCase CaseCollection::at(unsigned index) const
+{
+    return SwitchCase(m_switch->caseValue(index), m_owner->successor(index));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CheckSpecial.cpp b/b3/B3CheckSpecial.cpp
new file mode 100644
index 0000000..6f7826c
--- /dev/null
+++ b/b3/B3CheckSpecial.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CheckSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInstInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+unsigned numB3Args(B3::Kind kind)
+{
+    switch (kind.opcode()) {
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return 2;
+    case Check:
+        return 1;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+unsigned numB3Args(Value* value)
+{
+    return numB3Args(value->kind());
+}
+
+unsigned numB3Args(Inst& inst)
+{
+    return numB3Args(inst.origin);
+}
+
+} // anonymous namespace
+
+CheckSpecial::Key::Key(const Inst& inst)
+{
+    m_kind = inst.kind;
+    m_numArgs = inst.args.size();
+    m_stackmapRole = SameAsRep;
+}
+
+void CheckSpecial::Key::dump(PrintStream& out) const
+{
+    out.print(m_kind, "(", m_numArgs, ",", m_stackmapRole, ")");
+}
+
+CheckSpecial::CheckSpecial(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole)
+    : m_checkKind(kind)
+    , m_stackmapRole(stackmapRole)
+    , m_numCheckArgs(numArgs)
+{
+    ASSERT(isDefinitelyTerminal(kind.opcode));
+}
+
+CheckSpecial::CheckSpecial(const CheckSpecial::Key& key)
+    : CheckSpecial(key.kind(), key.numArgs(), key.stackmapRole())
+{
+}
+
+CheckSpecial::~CheckSpecial()
+{
+}
+
+Inst CheckSpecial::hiddenBranch(const Inst& inst) const
+{
+    Inst hiddenBranch(m_checkKind, inst.origin);
+    hiddenBranch.args.reserveInitialCapacity(m_numCheckArgs);
+    for (unsigned i = 0; i < m_numCheckArgs; ++i)
+        hiddenBranch.args.append(inst.args[i + 1]);
+    ASSERT(hiddenBranch.isTerminal());
+    return hiddenBranch;
+}
+
+void CheckSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    Inst hidden = hiddenBranch(inst);
+    hidden.forEachArg(
+        [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+            unsigned index = &arg - &hidden.args[0];
+            callback(inst.args[1 + index], role, type, width);
+        });
+
+    std::optional firstRecoverableIndex;
+    if (m_checkKind.opcode == BranchAdd32 || m_checkKind.opcode == BranchAdd64)
+        firstRecoverableIndex = 1;
+    forEachArgImpl(numB3Args(inst), m_numCheckArgs + 1, inst, m_stackmapRole, firstRecoverableIndex, callback);
+}
+
+bool CheckSpecial::isValid(Inst& inst)
+{
+    return hiddenBranch(inst).isValidForm()
+        && isValidImpl(numB3Args(inst), m_numCheckArgs + 1, inst)
+        && inst.args.size() - m_numCheckArgs - 1 == inst.origin->numChildren() - numB3Args(inst);
+}
+
+bool CheckSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+    if (argIndex >= 1 && argIndex < 1 + m_numCheckArgs)
+        return hiddenBranch(inst).admitsStack(argIndex - 1);
+    return admitsStackImpl(numB3Args(inst), m_numCheckArgs + 1, inst, argIndex);
+}
+
+std::optional CheckSpecial::shouldTryAliasingDef(Inst& inst)
+{
+    if (std::optional branchDef = hiddenBranch(inst).shouldTryAliasingDef())
+        return *branchDef + 1;
+    return std::nullopt;
+}
+
+CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+    CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context);
+    ASSERT(fail.isSet());
+
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    Vector reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst);
+
+    // Set aside the args that are relevant to undoing the operation. This is because we don't want to
+    // capture all of inst in the closure below.
+    Vector args;
+    for (unsigned i = 0; i < m_numCheckArgs; ++i)
+        args.append(inst.args[1 + i]);
+
+    context.latePaths.append(
+        createSharedTask(
+            [=] (CCallHelpers& jit, GenerationContext& context) {
+                fail.link(&jit);
+
+                // If necessary, undo the operation.
+                switch (m_checkKind.opcode) {
+                case BranchAdd32:
+                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
+                        // This is ugly, but that's fine - we won't have to do this very often.
+                        ASSERT(args[1].isGPR());
+                        GPRReg valueGPR = args[1].gpr();
+                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+                        jit.pushToSave(scratchGPR);
+                        jit.setCarry(scratchGPR);
+                        jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR);
+                        jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR);
+                        jit.or32(scratchGPR, valueGPR);
+                        jit.popToRestore(scratchGPR);
+                        break;
+                    }
+                    if (m_numCheckArgs == 4) {
+                        if (args[1] == args[3])
+                            Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context);
+                        else if (args[2] == args[3])
+                            Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context);
+                    } else if (m_numCheckArgs == 3)
+                        Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchAdd64:
+                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
+                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
+                        // This is ugly, but that's fine - we won't have to do this very often.
+                        ASSERT(args[1].isGPR());
+                        GPRReg valueGPR = args[1].gpr();
+                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
+                        jit.pushToSave(scratchGPR);
+                        jit.setCarry(scratchGPR);
+                        jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR);
+                        jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR);
+                        jit.or64(scratchGPR, valueGPR);
+                        jit.popToRestore(scratchGPR);
+                        break;
+                    }
+                    if (m_numCheckArgs == 4) {
+                        if (args[1] == args[3])
+                            Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context);
+                        else if (args[2] == args[3])
+                            Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context);
+                    } else if (m_numCheckArgs == 3)
+                        Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchSub32:
+                    Inst(Add32, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchSub64:
+                    Inst(Add64, nullptr, args[1], args[2]).generate(jit, context);
+                    break;
+                case BranchNeg32:
+                    Inst(Neg32, nullptr, args[1]).generate(jit, context);
+                    break;
+                case BranchNeg64:
+                    Inst(Neg64, nullptr, args[1]).generate(jit, context);
+                    break;
+                default:
+                    break;
+                }
+                
+                value->m_generator->run(jit, StackmapGenerationParams(value, reps, context));
+            }));
+
+    return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal.
+}
+
+void CheckSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print(m_checkKind, "(", m_numCheckArgs, ",", m_stackmapRole, ")");
+}
+
+void CheckSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("B3::CheckValue lowered to ", m_checkKind, " with ", m_numCheckArgs, " args.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CheckSpecial.h b/b3/B3CheckSpecial.h
new file mode 100644
index 0000000..aa7f2fe
--- /dev/null
+++ b/b3/B3CheckSpecial.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "B3StackmapSpecial.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace Air {
+struct Inst;
+}
+
+// We want to lower Check instructions to a branch, but then we want to route that branch to our
+// out-of-line code instead of doing anything else. For this reason, a CheckSpecial will remember
+// which branch opcode we have selected along with the number of args in the overload we want. It
+// will create an Inst with that opcode plus the appropriate args from the owning Inst whenever you
+// call any of the callbacks.
+//
+// Note that for CheckAdd, CheckSub, and CheckMul we expect that the B3 arguments are the reverse
+// of the Air arguments (Add(a, b) => Add32 b, a). Except:
+// - CheckSub(0, x), which turns into BranchNeg32 x.
+// - CheckMul(a, b), which turns into Mul32 b, a but we pass Any for a's ValueRep.
+
+class CheckSpecial : public StackmapSpecial {
+public:
+    // Support for hash consing these things.
+    class Key {
+    public:
+        Key()
+            : m_stackmapRole(SameAsRep)
+            , m_numArgs(0)
+        {
+        }
+        
+        Key(Air::Kind kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep)
+            : m_kind(kind)
+            , m_stackmapRole(stackmapRole)
+            , m_numArgs(numArgs)
+        {
+        }
+
+        explicit Key(const Air::Inst&);
+
+        bool operator==(const Key& other) const
+        {
+            return m_kind == other.m_kind
+                && m_numArgs == other.m_numArgs
+                && m_stackmapRole == other.m_stackmapRole;
+        }
+
+        bool operator!=(const Key& other) const
+        {
+            return !(*this == other);
+        }
+
+        explicit operator bool() const { return *this != Key(); }
+
+        Air::Kind kind() const { return m_kind; }
+        unsigned numArgs() const { return m_numArgs; }
+        RoleMode stackmapRole() const { return m_stackmapRole; }
+
+        void dump(PrintStream& out) const;
+
+        Key(WTF::HashTableDeletedValueType)
+            : m_stackmapRole(SameAsRep)
+            , m_numArgs(1)
+        {
+        }
+
+        bool isHashTableDeletedValue() const
+        {
+            return *this == Key(WTF::HashTableDeletedValue);
+        }
+
+        unsigned hash() const
+        {
+            // Seriously, we don't need to be smart here. It just doesn't matter.
+            return m_kind.hash() + m_numArgs + m_stackmapRole;
+        }
+        
+    private:
+        Air::Kind m_kind;
+        RoleMode m_stackmapRole;
+        unsigned m_numArgs;
+    };
+    
+    CheckSpecial(Air::Kind, unsigned numArgs, RoleMode stackmapRole = SameAsRep);
+    CheckSpecial(const Key&);
+    ~CheckSpecial();
+
+protected:
+    // Constructs and returns the Inst representing the branch that this will use.
+    Air::Inst hiddenBranch(const Air::Inst&) const;
+
+    void forEachArg(Air::Inst&, const ScopedLambda&) override;
+    bool isValid(Air::Inst&) override;
+    bool admitsStack(Air::Inst&, unsigned argIndex) override;
+    std::optional shouldTryAliasingDef(Air::Inst&) override;
+
+    // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+    // generates the stackmap. Super crazy dude!
+
+    CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+
+private:
+    Air::Kind m_checkKind;
+    RoleMode m_stackmapRole;
+    unsigned m_numCheckArgs;
+};
+
+struct CheckSpecialKeyHash {
+    static unsigned hash(const CheckSpecial::Key& key) { return key.hash(); }
+    static bool equal(const CheckSpecial::Key& a, const CheckSpecial::Key& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::CheckSpecialKeyHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits {
+    // I don't want to think about this very hard, it's not worth it. I'm a be conservative.
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CheckValue.cpp b/b3/B3CheckValue.cpp
new file mode 100644
index 0000000..79b6c6e
--- /dev/null
+++ b/b3/B3CheckValue.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3CheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+CheckValue::~CheckValue()
+{
+}
+
+void CheckValue::convertToAdd()
+{
+    RELEASE_ASSERT(opcode() == CheckAdd || opcode() == CheckSub || opcode() == CheckMul);
+    m_kind = CheckAdd;
+}
+
+Value* CheckValue::cloneImpl() const
+{
+    return new CheckValue(*this);
+}
+
+// Use this form for CheckAdd, CheckSub, and CheckMul.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* left, Value* right)
+    : StackmapValue(CheckedOpcode, kind, left->type(), origin)
+{
+    ASSERT(B3::isInt(type()));
+    ASSERT(left->type() == right->type());
+    ASSERT(kind == CheckAdd || kind == CheckSub || kind == CheckMul);
+    append(ConstrainedValue(left, ValueRep::WarmAny));
+    append(ConstrainedValue(right, ValueRep::WarmAny));
+}
+
+// Use this form for Check.
+CheckValue::CheckValue(Kind kind, Origin origin, Value* predicate)
+    : StackmapValue(CheckedOpcode, kind, Void, origin)
+{
+    ASSERT(kind == Check);
+    append(ConstrainedValue(predicate, ValueRep::WarmAny));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3CheckValue.h b/b3/B3CheckValue.h
new file mode 100644
index 0000000..e3d94ba
--- /dev/null
+++ b/b3/B3CheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+class CheckValue : public StackmapValue {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+        case Check:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    ~CheckValue();
+
+    void convertToAdd();
+
+protected:
+    Value* cloneImpl() const override;
+    
+private:
+    friend class Procedure;
+
+    // Use this form for CheckAdd, CheckSub, and CheckMul.
+    JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* left, Value* right);
+
+    // Use this form for Check.
+    JS_EXPORT_PRIVATE CheckValue(Kind, Origin, Value* predicate);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Common.cpp b/b3/B3Common.cpp
new file mode 100644
index 0000000..60da362
--- /dev/null
+++ b/b3/B3Common.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Common.h"
+
+#if ENABLE(B3_JIT)
+
+#include "DFGCommon.h"
+#include "FTLState.h"
+#include "Options.h"
+
+namespace JSC { namespace B3 {
+
+bool shouldDumpIR(B3ComplitationMode mode)
+{
+#if ENABLE(FTL_JIT)
+    return FTL::verboseCompilationEnabled() || FTL::shouldDumpDisassembly() || shouldDumpIRAtEachPhase(mode);
+#else
+    return shouldDumpIRAtEachPhase(mode);
+#endif
+}
+
+bool shouldDumpIRAtEachPhase(B3ComplitationMode mode)
+{
+    if (mode == B3Mode)
+        return Options::dumpGraphAtEachPhase() || Options::dumpB3GraphAtEachPhase();
+    return Options::dumpGraphAtEachPhase() || Options::dumpAirGraphAtEachPhase();
+}
+
+bool shouldValidateIR()
+{
+    return DFG::validationEnabled() || shouldValidateIRAtEachPhase();
+}
+
+bool shouldValidateIRAtEachPhase()
+{
+    return Options::validateGraphAtEachPhase();
+}
+
+bool shouldSaveIRBeforePhase()
+{
+    return Options::verboseValidationFailure();
+}
+
+bool shouldMeasurePhaseTiming()
+{
+    return Options::logB3PhaseTimes();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Common.h b/b3/B3Common.h
new file mode 100644
index 0000000..41e8ee0
--- /dev/null
+++ b/b3/B3Common.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "JSExportMacros.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+inline bool is64Bit() { return sizeof(void*) == 8; }
+inline bool is32Bit() { return !is64Bit(); }
+
+enum B3ComplitationMode {
+    B3Mode,
+    AirMode
+};
+
+JS_EXPORT_PRIVATE bool shouldDumpIR(B3ComplitationMode);
+bool shouldDumpIRAtEachPhase(B3ComplitationMode);
+bool shouldValidateIR();
+bool shouldValidateIRAtEachPhase();
+bool shouldSaveIRBeforePhase();
+bool shouldMeasurePhaseTiming();
+
+template
+inline bool isIdentical(InputType left, InputType right)
+{
+    BitsType leftBits = bitwise_cast(left);
+    BitsType rightBits = bitwise_cast(right);
+    return leftBits == rightBits;
+}
+
+inline bool isIdentical(int32_t left, int32_t right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(int64_t left, int64_t right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(double left, double right)
+{
+    return isIdentical(left, right);
+}
+
+inline bool isIdentical(float left, float right)
+{
+    return isIdentical(left, right);
+}
+
+template
+inline bool isRepresentableAsImpl(InputType originalValue)
+{
+    // Convert the original value to the desired result type.
+    ResultType result = static_cast(originalValue);
+
+    // Convert the converted value back to the original type. The original value is representable
+    // using the new type if such round-tripping doesn't lose bits.
+    InputType newValue = static_cast(result);
+
+    return isIdentical(originalValue, newValue);
+}
+
+template
+inline bool isRepresentableAs(int32_t value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+inline bool isRepresentableAs(int64_t value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+inline bool isRepresentableAs(double value)
+{
+    return isRepresentableAsImpl(value);
+}
+
+template
+static IntType chillDiv(IntType numerator, IntType denominator)
+{
+    if (!denominator)
+        return 0;
+    if (denominator == -1 && numerator == std::numeric_limits::min())
+        return std::numeric_limits::min();
+    return numerator / denominator;
+}
+
+template
+static IntType chillMod(IntType numerator, IntType denominator)
+{
+    if (!denominator)
+        return 0;
+    if (denominator == -1 && numerator == std::numeric_limits::min())
+        return 0;
+    return numerator % denominator;
+}
+
+template
+static IntType chillUDiv(IntType numerator, IntType denominator)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType unsignedNumerator = static_cast(numerator);
+    UnsignedIntType unsignedDenominator = static_cast(denominator);
+    if (!unsignedDenominator)
+        return 0;
+    return unsignedNumerator / unsignedDenominator;
+}
+
+template
+static IntType chillUMod(IntType numerator, IntType denominator)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType unsignedNumerator = static_cast(numerator);
+    UnsignedIntType unsignedDenominator = static_cast(denominator);
+    if (!unsignedDenominator)
+        return 0;
+    return unsignedNumerator % unsignedDenominator;
+}
+
+template
+static IntType rotateRight(IntType value, int32_t shift)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType uValue = static_cast(value);
+    int32_t bits = sizeof(IntType) * 8;
+    int32_t mask = bits - 1;
+    shift &= mask;
+    return (uValue >> shift) | (uValue << ((bits - shift) & mask));
+}
+
+template
+static IntType rotateLeft(IntType value, int32_t shift)
+{
+    typedef typename std::make_unsigned::type UnsignedIntType;
+    UnsignedIntType uValue = static_cast(value);
+    int32_t bits = sizeof(IntType) * 8;
+    int32_t mask = bits - 1;
+    shift &= mask;
+    return (uValue << shift) | (uValue >> ((bits - shift) & mask));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Commutativity.cpp b/b3/B3Commutativity.cpp
new file mode 100644
index 0000000..5de43e6
--- /dev/null
+++ b/b3/B3Commutativity.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Commutativity.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Commutativity commutativity)
+{
+    switch (commutativity) {
+    case Commutative:
+        out.print("Commutative");
+        return;
+    case NotCommutative:
+        out.print("NotCommutative");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Commutativity.h b/b3/B3Commutativity.h
new file mode 100644
index 0000000..bf0de75
--- /dev/null
+++ b/b3/B3Commutativity.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum Commutativity {
+    Commutative,
+    NotCommutative
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::B3::Commutativity);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Compilation.cpp b/b3/B3Compilation.cpp
new file mode 100644
index 0000000..9e20a6b
--- /dev/null
+++ b/b3/B3Compilation.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Compilation.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+Compilation::Compilation(MacroAssemblerCodeRef codeRef, std::unique_ptr byproducts)
+    : m_codeRef(codeRef)
+    , m_byproducts(WTFMove(byproducts))
+{
+}
+
+Compilation::Compilation(Compilation&& other)
+    : m_codeRef(WTFMove(other.m_codeRef))
+    , m_byproducts(WTFMove(other.m_byproducts))
+{
+}
+
+Compilation::~Compilation()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Compilation.h b/b3/B3Compilation.h
new file mode 100644
index 0000000..7398652
--- /dev/null
+++ b/b3/B3Compilation.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "MacroAssemblerCodeRef.h"
+#include 
+#include 
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class OpaqueByproducts;
+class Procedure;
+
+// This class is a way to keep the result of a B3 compilation alive
+// and runnable.
+
+class Compilation {
+    WTF_MAKE_NONCOPYABLE(Compilation);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    JS_EXPORT_PRIVATE Compilation(MacroAssemblerCodeRef, std::unique_ptr);
+    JS_EXPORT_PRIVATE Compilation(Compilation&&);
+    JS_EXPORT_PRIVATE ~Compilation();
+
+    MacroAssemblerCodePtr code() const { return m_codeRef.code(); }
+    MacroAssemblerCodeRef codeRef() const { return m_codeRef; }
+    
+    CString disassembly() const { return m_codeRef.disassembly(); }
+
+private:
+    MacroAssemblerCodeRef m_codeRef;
+    std::unique_ptr m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Compile.cpp b/b3/B3Compile.cpp
new file mode 100644
index 0000000..980390a
--- /dev/null
+++ b/b3/B3Compile.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Compile.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Generate.h"
+#include "B3OpaqueByproducts.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace B3 {
+
+Compilation compile(VM& vm, Procedure& proc, unsigned optLevel)
+{
+    TimingScope timingScope("Compilation");
+    
+    prepareForGeneration(proc, optLevel);
+    
+    CCallHelpers jit(&vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(vm, jit, nullptr);
+
+    return Compilation(FINALIZE_CODE(linkBuffer, ("B3::Compilation")), proc.releaseByproducts());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Compile.h b/b3/B3Compile.h
new file mode 100644
index 0000000..37db160
--- /dev/null
+++ b/b3/B3Compile.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Compilation.h"
+
+namespace JSC {
+
+class VM;
+
+namespace B3 {
+
+class Procedure;
+
+// This is a fool-proof API for compiling a Procedure to code and then running that code. You compile
+// a Procedure using this API by doing:
+//
+// Compilation compilation = B3::compile(vm, proc);
+//
+// Then you keep the Compilation object alive for as long as you want to be able to run the code.
+// If this API feels too high-level, you can use B3::generate() directly.
+
+JS_EXPORT_PRIVATE Compilation compile(VM&, Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ComputeDivisionMagic.h b/b3/B3ComputeDivisionMagic.h
new file mode 100644
index 0000000..8c17ed6
--- /dev/null
+++ b/b3/B3ComputeDivisionMagic.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ *
+ * This contains code taken from LLVM's APInt class. That code implements finding the magic
+ * numbers for strength-reducing division. The LLVM code on which this code is based was
+ * implemented using "Hacker's Delight", Henry S. Warren, Jr., chapter 10.
+ *
+ * ==============================================================================
+ * LLVM Release License
+ * ==============================================================================
+ * University of Illinois/NCSA
+ * Open Source License
+ * 
+ * Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
+ * All rights reserved.
+ * 
+ * Developed by:
+ * 
+ *     LLVM Team
+ * 
+ *     University of Illinois at Urbana-Champaign
+ * 
+ *     http://llvm.org
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal with
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ * 
+ *     * Redistributions of source code must retain the above copyright notice,
+ *       this list of conditions and the following disclaimers.
+ * 
+ *     * Redistributions in binary form must reproduce the above copyright notice,
+ *       this list of conditions and the following disclaimers in the
+ *       documentation and/or other materials provided with the distribution.
+ * 
+ *     * Neither the names of the LLVM Team, University of Illinois at
+ *       Urbana-Champaign, nor the names of its contributors may be used to
+ *       endorse or promote products derived from this Software without specific
+ *       prior written permission.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+template
+struct DivisionMagic {
+    T magicMultiplier;
+    unsigned shift;
+};
+
+// This contains code taken from LLVM's APInt::magic(). It's modestly adapted to our style, but
+// not completely, to make it easier to apply their changes in the future.
+template
+DivisionMagic computeDivisionMagic(T divisor)
+{
+    typedef typename std::make_unsigned::type UnsignedT;
+    UnsignedT d = divisor;
+    unsigned p;
+    UnsignedT ad, anc, delta, q1, r1, q2, r2, t;
+    UnsignedT signedMin = static_cast(std::numeric_limits::min());
+    DivisionMagic mag;
+    unsigned bitWidth = sizeof(divisor) * 8;
+
+    // This code doesn't like to think of signedness as a type. Instead it likes to think that
+    // operations have signedness. This is how we generally do it in B3 as well. For this reason,
+    // we cast all the operated values once to unsigned. And later, we convert it to signed.
+    // Only `divisor` have signedness here.
+
+    ad = divisor < 0 ? -divisor : divisor; // -(signed min value) < signed max value. So there is no loss.
+    t = signedMin + (d >> (bitWidth - 1));
+    anc = t - 1 - (t % ad);   // absolute value of nc
+    p = bitWidth - 1;    // initialize p
+    q1 = signedMin / anc;   // initialize q1 = 2p/abs(nc)
+    r1 = signedMin - q1*anc;    // initialize r1 = rem(2p,abs(nc))
+    q2 = signedMin / ad;    // initialize q2 = 2p/abs(d)
+    r2 = signedMin - q2*ad;     // initialize r2 = rem(2p,abs(d))
+    do {
+        p = p + 1;
+        q1 = q1 << 1;          // update q1 = 2p/abs(nc)
+        r1 = r1 << 1;          // update r1 = rem(2p/abs(nc))
+        if (r1 >= anc) {  // must be unsigned comparison
+            q1 = q1 + 1;
+            r1 = r1 - anc;
+        }
+        q2 = q2 << 1;          // update q2 = 2p/abs(d)
+        r2 = r2 << 1;          // update r2 = rem(2p/abs(d))
+        if (r2 >= ad) {   // must be unsigned comparison
+            q2 = q2 + 1;
+            r2 = r2 - ad;
+        }
+        delta = ad - r2;
+    } while (q1 < delta || (q1 == delta && r1 == 0));
+
+    mag.magicMultiplier = q2 + 1;
+    if (divisor < 0)
+        mag.magicMultiplier = -mag.magicMultiplier;   // resulting magic number
+    mag.shift = p - bitWidth;          // resulting shift
+
+    return mag;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Const32Value.cpp b/b3/B3Const32Value.cpp
new file mode 100644
index 0000000..49a7453
--- /dev/null
+++ b/b3/B3Const32Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Const32Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const32Value::~Const32Value()
+{
+}
+
+Value* Const32Value::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + other);
+}
+
+Value* Const32Value::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asInt32());
+}
+
+Value* Const32Value::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asInt32());
+}
+
+Value* Const32Value::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asInt32());
+}
+
+Value* Const32Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) + CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) - CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    CheckedInt32 result = CheckedInt32(m_value) * CheckedInt32(other->asInt32());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const32Value::checkNegConstant(Procedure& proc) const
+{
+    if (m_value == -m_value)
+        return nullptr;
+    return negConstant(proc);
+}
+
+Value* Const32Value::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillUDiv(m_value, other->asInt32()));
+}
+
+Value* Const32Value::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::uModConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), chillUMod(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value & other->asInt32());
+}
+
+Value* Const32Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value | other->asInt32());
+}
+
+Value* Const32Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value ^ other->asInt32());
+}
+
+Value* Const32Value::shlConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value << (other->asInt32() & 31));
+}
+
+Value* Const32Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value >> (other->asInt32() & 31));
+}
+
+Value* Const32Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), static_cast(static_cast(m_value) >> (other->asInt32() & 31)));
+}
+
+Value* Const32Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const32Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const32Value::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* Const32Value::iToDConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* Const32Value::iToFConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+TriState Const32Value::equalConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value == other->asInt32());
+}
+
+TriState Const32Value::notEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value != other->asInt32());
+}
+
+TriState Const32Value::lessThanConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value < other->asInt32());
+}
+
+TriState Const32Value::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value > other->asInt32());
+}
+
+TriState Const32Value::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value <= other->asInt32());
+}
+
+TriState Const32Value::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(m_value >= other->asInt32());
+}
+
+TriState Const32Value::aboveConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) > static_cast(other->asInt32()));
+}
+
+TriState Const32Value::belowConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) < static_cast(other->asInt32()));
+}
+
+TriState Const32Value::aboveEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) >= static_cast(other->asInt32()));
+}
+
+TriState Const32Value::belowEqualConstant(const Value* other) const
+{
+    if (!other->hasInt32())
+        return MixedTriState;
+    return triState(static_cast(m_value) <= static_cast(other->asInt32()));
+}
+
+void Const32Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_value);
+}
+
+Value* Const32Value::cloneImpl() const
+{
+    return new Const32Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Const32Value.h b/b3/B3Const32Value.h
new file mode 100644
index 0000000..af4d08b
--- /dev/null
+++ b/b3/B3Const32Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const32Value : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Const32; }
+    
+    ~Const32Value();
+    
+    int32_t value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* checkAddConstant(Procedure&, const Value* other) const override;
+    Value* checkSubConstant(Procedure&, const Value* other) const override;
+    Value* checkMulConstant(Procedure&, const Value* other) const override;
+    Value* checkNegConstant(Procedure&) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* uDivConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* uModConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* shlConstant(Procedure&, const Value* other) const override;
+    Value* sShrConstant(Procedure&, const Value* other) const override;
+    Value* zShrConstant(Procedure&, const Value* other) const override;
+    Value* rotRConstant(Procedure&, const Value* other) const override;
+    Value* rotLConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* iToDConstant(Procedure&) const override;
+    Value* iToFConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState aboveConstant(const Value* other) const override;
+    TriState belowConstant(const Value* other) const override;
+    TriState aboveEqualConstant(const Value* other) const override;
+    TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+    friend class Procedure;
+
+    Const32Value(Origin origin, int32_t value)
+        : Value(CheckedOpcode, Const32, Int32, origin)
+        , m_value(value)
+    {
+    }
+
+private:
+    int32_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Const64Value.cpp b/b3/B3Const64Value.cpp
new file mode 100644
index 0000000..4f7b86b
--- /dev/null
+++ b/b3/B3Const64Value.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Const64Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+Const64Value::~Const64Value()
+{
+}
+
+Value* Const64Value::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* Const64Value::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* Const64Value::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asInt64());
+}
+
+Value* Const64Value::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asInt64());
+}
+
+Value* Const64Value::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asInt64());
+}
+
+Value* Const64Value::checkAddConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) + CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkSubConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) - CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkMulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    CheckedInt64 result = CheckedInt64(m_value) * CheckedInt64(other->asInt64());
+    if (result.hasOverflowed())
+        return nullptr;
+    return proc.add(origin(), result.unsafeGet());
+}
+
+Value* Const64Value::checkNegConstant(Procedure& proc) const
+{
+    if (m_value == -m_value)
+        return nullptr;
+    return negConstant(proc);
+}
+
+Value* Const64Value::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uDivConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillUDiv(m_value, other->asInt64()));
+}
+
+Value* Const64Value::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::uModConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), chillUMod(m_value, other->asInt64()));
+}
+
+Value* Const64Value::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value & other->asInt64());
+}
+
+Value* Const64Value::bitOrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value | other->asInt64());
+}
+
+Value* Const64Value::bitXorConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt64())
+        return nullptr;
+    return proc.add(origin(), m_value ^ other->asInt64());
+}
+
+Value* Const64Value::shlConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value << (other->asInt32() & 63));
+}
+
+Value* Const64Value::sShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), m_value >> (other->asInt32() & 63));
+}
+
+Value* Const64Value::zShrConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), static_cast(static_cast(m_value) >> (other->asInt32() & 63)));
+}
+
+Value* Const64Value::rotRConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateRight(m_value, other->asInt32()));
+}
+
+Value* Const64Value::rotLConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasInt32())
+        return nullptr;
+    return proc.add(origin(), rotateLeft(m_value, other->asInt32()));
+}
+
+Value* Const64Value::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* Const64Value::iToDConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* Const64Value::iToFConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+TriState Const64Value::equalConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value == other->asInt64());
+}
+
+TriState Const64Value::notEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value != other->asInt64());
+}
+
+TriState Const64Value::lessThanConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value < other->asInt64());
+}
+
+TriState Const64Value::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value > other->asInt64());
+}
+
+TriState Const64Value::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value <= other->asInt64());
+}
+
+TriState Const64Value::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(m_value >= other->asInt64());
+}
+
+TriState Const64Value::aboveConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) > static_cast(other->asInt64()));
+}
+
+TriState Const64Value::belowConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) < static_cast(other->asInt64()));
+}
+
+TriState Const64Value::aboveEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) >= static_cast(other->asInt64()));
+}
+
+TriState Const64Value::belowEqualConstant(const Value* other) const
+{
+    if (!other->hasInt64())
+        return MixedTriState;
+    return triState(static_cast(m_value) <= static_cast(other->asInt64()));
+}
+
+void Const64Value::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_value);
+}
+
+Value* Const64Value::cloneImpl() const
+{
+    return new Const64Value(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Const64Value.h b/b3/B3Const64Value.h
new file mode 100644
index 0000000..3efd558
--- /dev/null
+++ b/b3/B3Const64Value.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE Const64Value : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Const64; }
+    
+    ~Const64Value();
+    
+    int64_t value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* checkAddConstant(Procedure&, const Value* other) const override;
+    Value* checkSubConstant(Procedure&, const Value* other) const override;
+    Value* checkMulConstant(Procedure&, const Value* other) const override;
+    Value* checkNegConstant(Procedure&) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* uDivConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* uModConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitOrConstant(Procedure&, const Value* other) const override;
+    Value* bitXorConstant(Procedure&, const Value* other) const override;
+    Value* shlConstant(Procedure&, const Value* other) const override;
+    Value* sShrConstant(Procedure&, const Value* other) const override;
+    Value* zShrConstant(Procedure&, const Value* other) const override;
+    Value* rotRConstant(Procedure&, const Value* other) const override;
+    Value* rotLConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* iToDConstant(Procedure&) const override;
+    Value* iToFConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState aboveConstant(const Value* other) const override;
+    TriState belowConstant(const Value* other) const override;
+    TriState aboveEqualConstant(const Value* other) const override;
+    TriState belowEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+    friend class Procedure;
+
+    Const64Value(Origin origin, int64_t value)
+        : Value(CheckedOpcode, Const64, Int64, origin)
+        , m_value(value)
+    {
+    }
+    
+private:
+    int64_t m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstDoubleValue.cpp b/b3/B3ConstDoubleValue.cpp
new file mode 100644
index 0000000..46183f3
--- /dev/null
+++ b/b3/B3ConstDoubleValue.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstDoubleValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstFloatValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstDoubleValue::~ConstDoubleValue()
+{
+}
+
+Value* ConstDoubleValue::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* ConstDoubleValue::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asDouble());
+}
+
+Value* ConstDoubleValue::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asDouble());
+}
+
+Value* ConstDoubleValue::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asDouble());
+}
+
+Value* ConstDoubleValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    double result = bitwise_cast(bitwise_cast(m_value) & bitwise_cast(other->asDouble()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstDoubleValue::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* ConstDoubleValue::doubleToFloatConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* ConstDoubleValue::absConstant(Procedure& proc) const
+{
+    return proc.add(origin(), fabs(m_value));
+}
+
+Value* ConstDoubleValue::ceilConstant(Procedure& proc) const
+{
+    return proc.add(origin(), ceil(m_value));
+}
+
+Value* ConstDoubleValue::floorConstant(Procedure& proc) const
+{
+    return proc.add(origin(), floor(m_value));
+}
+
+Value* ConstDoubleValue::sqrtConstant(Procedure& proc) const
+{
+    return proc.add(origin(), sqrt(m_value));
+}
+
+Value* ConstDoubleValue::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), m_value / other->asDouble());
+}
+
+Value* ConstDoubleValue::modConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasDouble())
+        return nullptr;
+    return proc.add(origin(), fmod(m_value, other->asDouble()));
+}
+
+TriState ConstDoubleValue::equalConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value == other->asDouble());
+}
+
+TriState ConstDoubleValue::notEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value != other->asDouble());
+}
+
+TriState ConstDoubleValue::lessThanConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value < other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value > other->asDouble());
+}
+
+TriState ConstDoubleValue::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value <= other->asDouble());
+}
+
+TriState ConstDoubleValue::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasDouble())
+        return MixedTriState;
+    return triState(m_value >= other->asDouble());
+}
+
+TriState ConstDoubleValue::equalOrUnorderedConstant(const Value* other) const
+{
+    if (std::isnan(m_value))
+        return TrueTriState;
+
+    if (!other->hasDouble())
+        return MixedTriState;
+    double otherValue = other->asDouble();
+    return triState(std::isunordered(m_value, otherValue) || m_value == otherValue);
+}
+
+void ConstDoubleValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma);
+    out.printf("%le", m_value);
+}
+
+Value* ConstDoubleValue::cloneImpl() const
+{
+    return new ConstDoubleValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstDoubleValue.h b/b3/B3ConstDoubleValue.h
new file mode 100644
index 0000000..75976f3
--- /dev/null
+++ b/b3/B3ConstDoubleValue.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstDoubleValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ConstDouble; }
+    
+    ~ConstDoubleValue();
+    
+    double value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* modConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* doubleToFloatConstant(Procedure&) const override;
+    Value* absConstant(Procedure&) const override;
+    Value* ceilConstant(Procedure&) const override;
+    Value* floorConstant(Procedure&) const override;
+    Value* sqrtConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+    TriState equalOrUnorderedConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ConstDoubleValue(Origin origin, double value)
+        : Value(CheckedOpcode, ConstDouble, Double, origin)
+        , m_value(value)
+    {
+    }
+    
+    double m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstFloatValue.cpp b/b3/B3ConstFloatValue.cpp
new file mode 100644
index 0000000..7c1b395
--- /dev/null
+++ b/b3/B3ConstFloatValue.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstFloatValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstDoubleValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+ConstFloatValue::~ConstFloatValue()
+{
+}
+
+Value* ConstFloatValue::negConstant(Procedure& proc) const
+{
+    return proc.add(origin(), -m_value);
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, int32_t other) const
+{
+    return proc.add(origin(), m_value + static_cast(other));
+}
+
+Value* ConstFloatValue::addConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value + other->asFloat());
+}
+
+Value* ConstFloatValue::subConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value - other->asFloat());
+}
+
+Value* ConstFloatValue::mulConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value * other->asFloat());
+}
+
+Value* ConstFloatValue::bitAndConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    float result = bitwise_cast(bitwise_cast(m_value) & bitwise_cast(other->asFloat()));
+    return proc.add(origin(), result);
+}
+
+Value* ConstFloatValue::bitwiseCastConstant(Procedure& proc) const
+{
+    return proc.add(origin(), bitwise_cast(m_value));
+}
+
+Value* ConstFloatValue::floatToDoubleConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(m_value));
+}
+
+Value* ConstFloatValue::absConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(fabs(m_value)));
+}
+
+Value* ConstFloatValue::ceilConstant(Procedure& proc) const
+{
+    return proc.add(origin(), ceilf(m_value));
+}
+
+Value* ConstFloatValue::floorConstant(Procedure& proc) const
+{
+    return proc.add(origin(), floorf(m_value));
+}
+
+Value* ConstFloatValue::sqrtConstant(Procedure& proc) const
+{
+    return proc.add(origin(), static_cast(sqrt(m_value)));
+}
+
+Value* ConstFloatValue::divConstant(Procedure& proc, const Value* other) const
+{
+    if (!other->hasFloat())
+        return nullptr;
+    return proc.add(origin(), m_value / other->asFloat());
+}
+
+TriState ConstFloatValue::equalConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value == other->asFloat());
+}
+
+TriState ConstFloatValue::notEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value != other->asFloat());
+}
+
+TriState ConstFloatValue::lessThanConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value < other->asFloat());
+}
+
+TriState ConstFloatValue::greaterThanConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value > other->asFloat());
+}
+
+TriState ConstFloatValue::lessEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value <= other->asFloat());
+}
+
+TriState ConstFloatValue::greaterEqualConstant(const Value* other) const
+{
+    if (!other->hasFloat())
+        return MixedTriState;
+    return triState(m_value >= other->asFloat());
+}
+
+void ConstFloatValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma);
+    out.printf("%le", m_value);
+}
+
+Value* ConstFloatValue::cloneImpl() const
+{
+    return new ConstFloatValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstFloatValue.h b/b3/B3ConstFloatValue.h
new file mode 100644
index 0000000..1974f8c
--- /dev/null
+++ b/b3/B3ConstFloatValue.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE ConstFloatValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == ConstFloat; }
+
+    ~ConstFloatValue();
+
+    float value() const { return m_value; }
+
+    Value* negConstant(Procedure&) const override;
+    Value* addConstant(Procedure&, int32_t other) const override;
+    Value* addConstant(Procedure&, const Value* other) const override;
+    Value* subConstant(Procedure&, const Value* other) const override;
+    Value* divConstant(Procedure&, const Value* other) const override;
+    Value* mulConstant(Procedure&, const Value* other) const override;
+    Value* bitAndConstant(Procedure&, const Value* other) const override;
+    Value* bitwiseCastConstant(Procedure&) const override;
+    Value* floatToDoubleConstant(Procedure&) const override;
+    Value* absConstant(Procedure&) const override;
+    Value* ceilConstant(Procedure&) const override;
+    Value* floorConstant(Procedure&) const override;
+    Value* sqrtConstant(Procedure&) const override;
+
+    TriState equalConstant(const Value* other) const override;
+    TriState notEqualConstant(const Value* other) const override;
+    TriState lessThanConstant(const Value* other) const override;
+    TriState greaterThanConstant(const Value* other) const override;
+    TriState lessEqualConstant(const Value* other) const override;
+    TriState greaterEqualConstant(const Value* other) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    ConstFloatValue(Origin origin, float value)
+        : Value(CheckedOpcode, ConstFloat, Float, origin)
+        , m_value(value)
+    {
+    }
+
+    float m_value;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstPtrValue.h b/b3/B3ConstPtrValue.h
new file mode 100644
index 0000000..78bcba3
--- /dev/null
+++ b/b3/B3ConstPtrValue.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+
+namespace JSC { namespace B3 {
+
+// Usually you want to use Const32Value or Const64Value directly. But this is useful for writing
+// platform-agnostic code. Note that a ConstPtrValue will behave like either a Const32Value or
+// Const64Value depending on platform.
+
+#if USE(JSVALUE64)
+typedef Const64Value ConstPtrValueBase;
+#else
+typedef Const32Value ConstPtrValueBase;
+#endif
+
+class ConstPtrValue : public ConstPtrValueBase {
+public:
+    void* value() const
+    {
+        return bitwise_cast(ConstPtrValueBase::value());
+    }
+
+private:
+    friend class Procedure;
+
+    template
+    ConstPtrValue(Origin origin, T* pointer)
+        : ConstPtrValueBase(origin, bitwise_cast(pointer))
+    {
+    }
+    template
+    ConstPtrValue(Origin origin, T pointer)
+        : ConstPtrValueBase(origin, static_cast(pointer))
+    {
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ConstrainedValue.cpp b/b3/B3ConstrainedValue.cpp
new file mode 100644
index 0000000..dd1762f
--- /dev/null
+++ b/b3/B3ConstrainedValue.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ConstrainedValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+void ConstrainedValue::dump(PrintStream& out) const
+{
+    out.print(pointerDump(m_value), ":", m_rep);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3ConstrainedValue.h b/b3/B3ConstrainedValue.h
new file mode 100644
index 0000000..d2cd31f
--- /dev/null
+++ b/b3/B3ConstrainedValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+class ConstrainedValue {
+public:
+    ConstrainedValue()
+    {
+    }
+
+    ConstrainedValue(Value* value)
+        : m_value(value)
+        , m_rep(ValueRep::WarmAny)
+    {
+    }
+
+    ConstrainedValue(Value* value, const ValueRep& rep)
+        : m_value(value)
+        , m_rep(rep)
+    {
+    }
+
+    explicit operator bool() const { return m_value || m_rep; }
+
+    Value* value() const { return m_value; }
+    const ValueRep& rep() const { return m_rep; }
+
+    void dump(PrintStream& out) const;
+
+private:
+    Value* m_value;
+    ValueRep m_rep;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3DataSection.cpp b/b3/B3DataSection.cpp
new file mode 100644
index 0000000..f4e68bc
--- /dev/null
+++ b/b3/B3DataSection.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3DataSection.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+DataSection::DataSection(size_t size)
+    : m_data(fastZeroedMalloc(size))
+    , m_size(size)
+{
+}
+
+DataSection::~DataSection()
+{
+    fastFree(m_data);
+}
+
+void DataSection::dump(PrintStream& out) const
+{
+    out.print("DataSection at ", RawPointer(m_data), " with ", m_size, " bytes.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3DataSection.h b/b3/B3DataSection.h
new file mode 100644
index 0000000..0bca40e
--- /dev/null
+++ b/b3/B3DataSection.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+
+namespace JSC { namespace B3 {
+
+class DataSection : public OpaqueByproduct {
+public:
+    DataSection(size_t size);
+    virtual ~DataSection();
+
+    void* data() const { return m_data; }
+    size_t size() const { return m_size; }
+
+    void dump(PrintStream&) const override;
+
+private:
+    void* m_data;
+    size_t m_size;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Dominators.h b/b3/B3Dominators.h
new file mode 100644
index 0000000..4a9d085
--- /dev/null
+++ b/b3/B3Dominators.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CFG.h"
+#include "B3Procedure.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Dominators : public WTF::Dominators {
+    WTF_MAKE_NONCOPYABLE(Dominators);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    Dominators(Procedure& proc)
+        : WTF::Dominators(proc.cfg())
+    {
+    }
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3DuplicateTails.cpp b/b3/B3DuplicateTails.cpp
new file mode 100644
index 0000000..fe94a60
--- /dev/null
+++ b/b3/B3DuplicateTails.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3DuplicateTails.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3FixSSA.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class DuplicateTails {
+public:
+    DuplicateTails(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_maxSize(Options::maxB3TailDupBlockSize())
+        , m_maxSuccessors(Options::maxB3TailDupBlockSuccessors())
+    {
+    }
+
+    void run()
+    {
+        // Breaking critical edges introduces blocks that jump to things. Those Jumps' successors
+        // become candidates for tail duplication. Prior to critical edge breaking, some of those
+        // Jumps would have been Branches, and so no tail duplication would have happened.
+        breakCriticalEdges(m_proc);
+        
+        // Find blocks that would be candidates for tail duplication. They must be small enough
+        // and they much not have too many successors.
+
+        m_proc.resetValueOwners();
+
+        IndexSet candidates;
+
+        for (BasicBlock* block : m_proc) {
+            if (block->size() > m_maxSize)
+                continue;
+            if (block->numSuccessors() > m_maxSuccessors)
+                continue;
+            if (block->last()->type() != Void) // Demoting doesn't handle terminals with values.
+                continue;
+
+            candidates.add(block);
+        }
+
+        // Collect the set of values that must be de-SSA'd.
+        IndexSet valuesToDemote;
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                if (value->opcode() == Phi && candidates.contains(block))
+                    valuesToDemote.add(value);
+                for (Value* child : value->children()) {
+                    if (child->owner != block && candidates.contains(child->owner))
+                        valuesToDemote.add(child);
+                }
+            }
+        }
+        demoteValues(m_proc, valuesToDemote);
+        if (verbose) {
+            dataLog("Procedure after value demotion:\n");
+            dataLog(m_proc);
+        }
+
+        for (BasicBlock* block : m_proc) {
+            if (block->last()->opcode() != Jump)
+                continue;
+
+            BasicBlock* tail = block->successorBlock(0);
+            if (!candidates.contains(tail))
+                continue;
+
+            // Don't tail duplicate a trivial self-loop, because the code below can't handle block and
+            // tail being the same block.
+            if (block == tail)
+                continue;
+
+            // We're about to change 'block'. Make sure that nobody duplicates block after this
+            // point.
+            candidates.remove(block);
+
+            if (verbose)
+                dataLog("Duplicating ", *tail, " into ", *block, "\n");
+
+            block->removeLast(m_proc);
+
+            HashMap map;
+            for (Value* value : *tail) {
+                Value* clone = m_proc.clone(value);
+                for (Value*& child : clone->children()) {
+                    if (Value* replacement = map.get(child))
+                        child = replacement;
+                }
+                if (value->type() != Void)
+                    map.add(value, clone);
+                block->append(clone);
+            }
+            block->successors() = tail->successors();
+        }
+
+        m_proc.resetReachability();
+        m_proc.invalidateCFG();
+    }
+    
+private:
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    unsigned m_maxSize;
+    unsigned m_maxSuccessors;
+};
+
+} // anonymous namespace
+
+void duplicateTails(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "duplicateTails");
+    DuplicateTails duplicateTails(proc);
+    duplicateTails.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3DuplicateTails.h b/b3/B3DuplicateTails.h
new file mode 100644
index 0000000..443adaf
--- /dev/null
+++ b/b3/B3DuplicateTails.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Replaces jumps to tiny basic blocks with the contents of those basic blocks. Also simplifies
+// branches that are path-redundant. Does not do a fixpoint, because it does not have a good way
+// of detecting termination.
+
+void duplicateTails(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Effects.cpp b/b3/B3Effects.cpp
new file mode 100644
index 0000000..aeda46f
--- /dev/null
+++ b/b3/B3Effects.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Effects.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// These helpers cascade in such a way that after the helper for terminal, we don't have to worry
+// about terminal again, since the terminal case considers all ways that a terminal may interfere
+// with something else. And after the exit sideways case, we don't have to worry about either
+// exitsSideways or terminal. And so on...
+
+bool interferesWithTerminal(const Effects& terminal, const Effects& other)
+{
+    if (!terminal.terminal)
+        return false;
+    return other.terminal || other.controlDependent || other.writesLocalState || other.writes || other.writesPinned;
+}
+
+bool interferesWithExitSideways(const Effects& exitsSideways, const Effects& other)
+{
+    if (!exitsSideways.exitsSideways)
+        return false;
+    return other.controlDependent || other.writes || other.writesPinned;
+}
+
+bool interferesWithWritesLocalState(const Effects& writesLocalState, const Effects& other)
+{
+    if (!writesLocalState.writesLocalState)
+        return false;
+    return other.writesLocalState || other.readsLocalState;
+}
+
+bool interferesWithWritesPinned(const Effects& writesPinned, const Effects& other)
+{
+    if (!writesPinned.writesPinned)
+        return false;
+    return other.writesPinned || other.readsPinned;
+}
+
+} // anonymous namespace
+
+bool Effects::interferes(const Effects& other) const
+{
+    return interferesWithTerminal(*this, other)
+        || interferesWithTerminal(other, *this)
+        || interferesWithExitSideways(*this, other)
+        || interferesWithExitSideways(other, *this)
+        || interferesWithWritesLocalState(*this, other)
+        || interferesWithWritesLocalState(other, *this)
+        || interferesWithWritesPinned(*this, other)
+        || interferesWithWritesPinned(other, *this)
+        || writes.overlaps(other.writes)
+        || writes.overlaps(other.reads)
+        || reads.overlaps(other.writes);
+}
+
+bool Effects::operator==(const Effects& other) const
+{
+    return terminal == other.terminal
+        && exitsSideways == other.exitsSideways
+        && controlDependent == other.controlDependent
+        && writesLocalState == other.writesLocalState
+        && readsLocalState == other.readsLocalState
+        && writesPinned == other.writesPinned
+        && readsPinned == other.readsPinned
+        && writes == other.writes
+        && reads == other.reads;
+}
+
+bool Effects::operator!=(const Effects& other) const
+{
+    return !(*this == other);
+}
+
+void Effects::dump(PrintStream& out) const
+{
+    CommaPrinter comma("|");
+    if (terminal)
+        out.print(comma, "Terminal");
+    if (exitsSideways)
+        out.print(comma, "ExitsSideways");
+    if (controlDependent)
+        out.print(comma, "ControlDependent");
+    if (writesLocalState)
+        out.print(comma, "WritesLocalState");
+    if (readsLocalState)
+        out.print(comma, "ReadsLocalState");
+    if (writesPinned)
+        out.print(comma, "WritesPinned");
+    if (readsPinned)
+        out.print(comma, "ReadsPinned");
+    if (writes)
+        out.print(comma, "Writes:", writes);
+    if (reads)
+        out.print(comma, "Reads:", reads);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Effects.h b/b3/B3Effects.h
new file mode 100644
index 0000000..7a08853
--- /dev/null
+++ b/b3/B3Effects.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+struct Effects {
+    // True if this cannot continue execution in the current block.
+    bool terminal { false };
+
+    // True if this value can cause execution to terminate abruptly, and that this abrupt termination is
+    // observable. An example of how this gets used is to limit the hoisting of controlDependent values.
+    // Note that if exitsSideways is set to true but reads is bottom, then B3 is free to assume that
+    // after abrupt termination of this procedure, none of the heap will be read. That's usually false,
+    // so make sure that reads corresponds to the set of things that are readable after this function
+    // terminates abruptly.
+    bool exitsSideways { false };
+
+    // True if the instruction may change semantics if hoisted above some control flow. For example,
+    // loads are usually control-dependent because we must assume that any control construct (either
+    // a terminal like Branch or anything that exits sideways, like Check) validates whether the
+    // pointer is valid. Hoisting the load above control may cause the load to trap even though it
+    // would not have otherwise trapped.
+    bool controlDependent { false };
+
+    // True if this writes to the local state. Operations that write local state don't write to anything
+    // in "memory" but they have a side-effect anyway. This is for modeling Upsilons, Sets, and Fences.
+    // This is a way of saying: even though this operation is not a terminal, does not exit sideways,
+    // and does not write to the heap, you still cannot kill this operation.
+    bool writesLocalState { false };
+
+    // True if this reads from the local state. This is only used for Phi and Get.
+    bool readsLocalState { false };
+
+    // B3 understands things about pinned registers. Therefore, it needs to know who reads them and
+    // who writes them. We don't track this on a per-register basis because that would be harder and
+    // we don't need it. Note that if you want to construct an immutable pinned register while also
+    // having other pinned registers that are mutable, then you can use ArgumentReg. Also note that
+    // nobody will stop you from making this get out-of-sync with your clobbered register sets in
+    // Patchpoint. It's recommended that you err on the side of being conservative.
+    // FIXME: Explore making these be RegisterSets. That's mainly hard because it would be awkward to
+    // reconcile with StackmapValue's support for clobbered regs.
+    // https://bugs.webkit.org/show_bug.cgi?id=163173
+    bool readsPinned { false };
+    bool writesPinned { false };
+
+    HeapRange writes;
+    HeapRange reads;
+    
+    static Effects none()
+    {
+        return Effects();
+    }
+
+    static Effects forCall()
+    {
+        Effects result;
+        result.exitsSideways = true;
+        result.controlDependent = true;
+        result.writes = HeapRange::top();
+        result.reads = HeapRange::top();
+        result.readsPinned = true;
+        result.writesPinned = true;
+        return result;
+    }
+
+    static Effects forCheck()
+    {
+        Effects result;
+        result.exitsSideways = true;
+        // The program could read anything after exiting, and it's on us to declare this.
+        result.reads = HeapRange::top();
+        return result;
+    }
+
+    bool mustExecute() const
+    {
+        return terminal || exitsSideways || writesLocalState || writes || writesPinned;
+    }
+
+    // Returns true if reordering instructions with these respective effects would change program
+    // behavior in an observable way.
+    bool interferes(const Effects&) const;
+    
+    JS_EXPORT_PRIVATE bool operator==(const Effects&) const;
+    JS_EXPORT_PRIVATE bool operator!=(const Effects&) const;
+
+    JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3EliminateCommonSubexpressions.cpp b/b3/B3EliminateCommonSubexpressions.cpp
new file mode 100644
index 0000000..feaacda
--- /dev/null
+++ b/b3/B3EliminateCommonSubexpressions.cpp
@@ -0,0 +1,703 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3EliminateCommonSubexpressions.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3Dominators.h"
+#include "B3HeapRange.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3ValueKey.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "DFGGraph.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+// FIXME: We could treat Patchpoints with a non-empty set of reads as a "memory value" and somehow
+// eliminate redundant ones. We would need some way of determining if two patchpoints are replacable.
+// It doesn't seem right to use the reads set for this. We could use the generator, but that feels
+// lame because the FTL will pretty much use a unique generator for each patchpoint even when two
+// patchpoints have the same semantics as far as CSE would be concerned. We could invent something
+// like a "value ID" for patchpoints. By default, each one gets a unique value ID, but FTL could force
+// some patchpoints to share the same one as a signal that they will return the same value if executed
+// in the same heap with the same inputs.
+
+typedef Vector MemoryMatches;
+
+class MemoryValueMap {
+public:
+    MemoryValueMap() { }
+
+    void add(MemoryValue* memory)
+    {
+        Matches& matches = m_map.add(memory->lastChild(), Matches()).iterator->value;
+        if (matches.contains(memory))
+            return;
+        matches.append(memory);
+    }
+
+    template
+    void removeIf(const Functor& functor)
+    {
+        m_map.removeIf(
+            [&] (HashMap::KeyValuePairType& entry) -> bool {
+                entry.value.removeAllMatching(
+                    [&] (Value* value) -> bool {
+                        if (MemoryValue* memory = value->as())
+                            return functor(memory);
+                        return true;
+                    });
+                return entry.value.isEmpty();
+            });
+    }
+
+    Matches* find(Value* ptr)
+    {
+        auto iter = m_map.find(ptr);
+        if (iter == m_map.end())
+            return nullptr;
+        return &iter->value;
+    }
+
+    template
+    MemoryValue* find(Value* ptr, const Functor& functor)
+    {
+        if (Matches* matches = find(ptr)) {
+            for (Value* candidateValue : *matches) {
+                if (MemoryValue* candidateMemory = candidateValue->as()) {
+                    if (functor(candidateMemory))
+                        return candidateMemory;
+                }
+            }
+        }
+        return nullptr;
+    }
+
+    void dump(PrintStream& out) const
+    {
+        out.print("{");
+        CommaPrinter comma;
+        for (auto& entry : m_map)
+            out.print(comma, pointerDump(entry.key), "=>", pointerListDump(entry.value));
+        out.print("}");
+    }
+    
+private:
+    // This uses Matches for two reasons:
+    // - It cannot be a MemoryValue* because the key is imprecise. Many MemoryValues could have the
+    //   same key while being unaliased.
+    // - It can't be a MemoryMatches array because the MemoryValue*'s could be turned into Identity's.
+    HashMap m_map;
+};
+
+struct ImpureBlockData {
+    void dump(PrintStream& out) const
+    {
+        out.print(
+            "{reads = ", reads, ", writes = ", writes, ", storesAtHead = ", storesAtHead,
+            ", memoryValuesAtTail = ", memoryValuesAtTail, "}");
+    }
+
+    RangeSet reads; // This only gets used for forward store elimination.
+    RangeSet writes; // This gets used for both load and store elimination.
+
+    MemoryValueMap storesAtHead;
+    MemoryValueMap memoryValuesAtTail;
+};
+
+class CSE {
+public:
+    CSE(Procedure& proc)
+        : m_proc(proc)
+        , m_dominators(proc.dominators())
+        , m_impureBlockData(proc.size())
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        if (verbose)
+            dataLog("B3 before CSE:\n", m_proc);
+        
+        m_proc.resetValueOwners();
+
+        // Summarize the impure effects of each block, and the impure values available at the end of
+        // each block. This doesn't edit code yet.
+        for (BasicBlock* block : m_proc) {
+            ImpureBlockData& data = m_impureBlockData[block];
+            for (Value* value : *block) {
+                Effects effects = value->effects();
+                MemoryValue* memory = value->as();
+                
+                if (memory && memory->isStore()
+                    && !data.reads.overlaps(memory->range())
+                    && !data.writes.overlaps(memory->range()))
+                    data.storesAtHead.add(memory);
+                data.reads.add(effects.reads);
+
+                if (HeapRange writes = effects.writes)
+                    clobber(data, writes);
+
+                if (memory)
+                    data.memoryValuesAtTail.add(memory);
+            }
+
+            if (verbose)
+                dataLog("Block ", *block, ": ", data, "\n");
+        }
+
+        // Perform CSE. This edits code.
+        Vector postOrder = m_proc.blocksInPostOrder();
+        for (unsigned i = postOrder.size(); i--;) {
+            m_block = postOrder[i];
+            if (verbose)
+                dataLog("Looking at ", *m_block, ":\n");
+
+            m_data = ImpureBlockData();
+            for (m_index = 0; m_index < m_block->size(); ++m_index) {
+                m_value = m_block->at(m_index);
+                process();
+            }
+            m_insertionSet.execute(m_block);
+            m_impureBlockData[m_block] = m_data;
+        }
+
+        // The previous pass might have requested that we insert code in some basic block other than
+        // the one that it was looking at. This inserts them.
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                auto iter = m_sets.find(block->at(valueIndex));
+                if (iter == m_sets.end())
+                    continue;
+
+                for (Value* value : iter->value)
+                    m_insertionSet.insertValue(valueIndex + 1, value);
+            }
+            m_insertionSet.execute(block);
+        }
+
+        if (verbose)
+            dataLog("B3 after CSE:\n", m_proc);
+
+        return m_changed;
+    }
+    
+private:
+    void process()
+    {
+        m_value->performSubstitution();
+
+        if (m_pureCSE.process(m_value, m_dominators)) {
+            ASSERT(!m_value->effects().writes);
+            m_changed = true;
+            return;
+        }
+
+        MemoryValue* memory = m_value->as();
+        if (memory && processMemoryBeforeClobber(memory))
+            return;
+
+        if (HeapRange writes = m_value->effects().writes)
+            clobber(m_data, writes);
+        
+        if (memory)
+            processMemoryAfterClobber(memory);
+    }
+
+    // Return true if we got rid of the operation. If you changed IR in this function, you have to
+    // set m_changed even if you also return true.
+    bool processMemoryBeforeClobber(MemoryValue* memory)
+    {
+        Value* value = memory->child(0);
+        Value* ptr = memory->lastChild();
+        HeapRange range = memory->range();
+        int32_t offset = memory->offset();
+
+        switch (memory->opcode()) {
+        case Store8:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store8 && candidate->child(0) == value)
+                            || ((candidate->opcode() == Load8Z || candidate->opcode() == Load8S)
+                                && candidate == value));
+                });
+        case Store16:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store16 && candidate->child(0) == value)
+                            || ((candidate->opcode() == Load16Z || candidate->opcode() == Load16S)
+                                && candidate == value));
+                });
+        case Store:
+            return handleStoreBeforeClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && ((candidate->opcode() == Store && candidate->child(0) == value)
+                            || (candidate->opcode() == Load && candidate == value));
+                });
+        default:
+            return false;
+        }
+    }
+
+    void clobber(ImpureBlockData& data, HeapRange writes)
+    {
+        data.writes.add(writes);
+        
+        data.memoryValuesAtTail.removeIf(
+            [&] (MemoryValue* memory) {
+                return memory->range().overlaps(writes);
+            });
+    }
+
+    void processMemoryAfterClobber(MemoryValue* memory)
+    {
+        Value* ptr = memory->lastChild();
+        HeapRange range = memory->range();
+        int32_t offset = memory->offset();
+        Type type = memory->type();
+
+        // FIXME: Empower this to insert more casts and shifts. For example, a Load8 could match a
+        // Store and mask the result. You could even have:
+        //
+        // Store(@value, @ptr, offset = 0)
+        // Load8Z(@ptr, offset = 2)
+        //
+        // Which could be turned into something like this:
+        //
+        // Store(@value, @ptr, offset = 0)
+        // ZShr(@value, 16)
+        
+        switch (memory->opcode()) {
+        case Load8Z: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load8Z || candidate->opcode() == Store8);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store8) {
+                        Value* mask = m_proc.add(m_value->origin(), 0xff);
+                        fixups.append(mask);
+                        Value* zext = m_proc.add(
+                            BitAnd, m_value->origin(), match->child(0), mask);
+                        fixups.append(zext);
+                        return zext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load8S: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load8S || candidate->opcode() == Store8);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store8) {
+                        Value* sext = m_proc.add(
+                            SExt8, m_value->origin(), match->child(0));
+                        fixups.append(sext);
+                        return sext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load16Z: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load16Z || candidate->opcode() == Store16);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store16) {
+                        Value* mask = m_proc.add(m_value->origin(), 0xffff);
+                        fixups.append(mask);
+                        Value* zext = m_proc.add(
+                            BitAnd, m_value->origin(), match->child(0), mask);
+                        fixups.append(zext);
+                        return zext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load16S: {
+            handleMemoryValue(
+                ptr, range, [&] (MemoryValue* candidate) -> bool {
+                    return candidate->offset() == offset
+                        && (candidate->opcode() == Load16S || candidate->opcode() == Store16);
+                },
+                [&] (MemoryValue* match, Vector& fixups) -> Value* {
+                    if (match->opcode() == Store16) {
+                        Value* sext = m_proc.add(
+                            SExt16, m_value->origin(), match->child(0));
+                        fixups.append(sext);
+                        return sext;
+                    }
+                    return nullptr;
+                });
+            break;
+        }
+
+        case Load: {
+            handleMemoryValue(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    if (candidate->offset() != offset)
+                        return false;
+
+                    if (candidate->opcode() == Load && candidate->type() == type)
+                        return true;
+
+                    if (candidate->opcode() == Store && candidate->child(0)->type() == type)
+                        return true;
+
+                    return false;
+                });
+            break;
+        }
+
+        case Store8: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store8
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+            
+        case Store16: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store16
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+            
+        case Store: {
+            handleStoreAfterClobber(
+                ptr, range,
+                [&] (MemoryValue* candidate) -> bool {
+                    return candidate->opcode() == Store
+                        && candidate->offset() == offset;
+                });
+            break;
+        }
+
+        default:
+            dataLog("Bad memory value: ", deepDump(m_proc, m_value), "\n");
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+
+    template
+    bool handleStoreBeforeClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        MemoryMatches matches = findMemoryValue(ptr, range, filter);
+        if (matches.isEmpty())
+            return false;
+
+        m_value->replaceWithNop();
+        m_changed = true;
+        return true;
+    }
+
+    template
+    void handleStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        if (!m_value->traps() && findStoreAfterClobber(ptr, range, filter)) {
+            m_value->replaceWithNop();
+            m_changed = true;
+            return;
+        }
+
+        m_data.memoryValuesAtTail.add(m_value->as());
+    }
+
+    template
+    bool findStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        // We can eliminate a store if every forward path hits a store to the same location before
+        // hitting any operation that observes the store. This search seems like it should be
+        // expensive, but in the overwhelming majority of cases it will almost immediately hit an 
+        // operation that interferes.
+
+        if (verbose)
+            dataLog(*m_value, ": looking forward for stores to ", *ptr, "...\n");
+
+        // First search forward in this basic block.
+        // FIXME: It would be cool to get rid of this linear search. It's not super critical since
+        // we will probably bail out very quickly, but it *is* annoying.
+        for (unsigned index = m_index + 1; index < m_block->size(); ++index) {
+            Value* value = m_block->at(index);
+
+            if (MemoryValue* memoryValue = value->as()) {
+                if (memoryValue->lastChild() == ptr && filter(memoryValue))
+                    return true;
+            }
+
+            Effects effects = value->effects();
+            if (effects.reads.overlaps(range) || effects.writes.overlaps(range))
+                return false;
+        }
+
+        if (!m_block->numSuccessors())
+            return false;
+
+        BlockWorklist worklist;
+        worklist.pushAll(m_block->successorBlocks());
+
+        while (BasicBlock* block = worklist.pop()) {
+            ImpureBlockData& data = m_impureBlockData[block];
+
+            MemoryValue* match = data.storesAtHead.find(ptr, filter);
+            if (match && match != m_value)
+                continue;
+
+            if (data.writes.overlaps(range) || data.reads.overlaps(range))
+                return false;
+
+            if (!block->numSuccessors())
+                return false;
+
+            worklist.pushAll(block->successorBlocks());
+        }
+
+        return true;
+    }
+
+    template
+    void handleMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        handleMemoryValue(
+            ptr, range, filter,
+            [] (MemoryValue*, Vector&) -> Value* {
+                return nullptr;
+            });
+    }
+
+    template
+    void handleMemoryValue(
+        Value* ptr, HeapRange range, const Filter& filter, const Replace& replace)
+    {
+        MemoryMatches matches = findMemoryValue(ptr, range, filter);
+        if (replaceMemoryValue(matches, replace))
+            return;
+        m_data.memoryValuesAtTail.add(m_value->as());
+    }
+
+    template
+    bool replaceMemoryValue(const MemoryMatches& matches, const Replace& replace)
+    {
+        if (matches.isEmpty())
+            return false;
+
+        if (verbose)
+            dataLog("Eliminating ", *m_value, " due to ", pointerListDump(matches), "\n");
+        
+        m_changed = true;
+
+        if (matches.size() == 1) {
+            MemoryValue* dominatingMatch = matches[0];
+            RELEASE_ASSERT(m_dominators.dominates(dominatingMatch->owner, m_block));
+            
+            if (verbose)
+                dataLog("    Eliminating using ", *dominatingMatch, "\n");
+            Vector extraValues;
+            if (Value* value = replace(dominatingMatch, extraValues)) {
+                for (Value* extraValue : extraValues)
+                    m_insertionSet.insertValue(m_index, extraValue);
+                m_value->replaceWithIdentity(value);
+            } else {
+                if (dominatingMatch->isStore())
+                    m_value->replaceWithIdentity(dominatingMatch->child(0));
+                else
+                    m_value->replaceWithIdentity(dominatingMatch);
+            }
+            return true;
+        }
+
+        // FIXME: It would be way better if this phase just did SSA calculation directly.
+        // Right now we're relying on the fact that CSE's position in the phase order is
+        // almost right before SSA fixup.
+
+        Variable* variable = m_proc.addVariable(m_value->type());
+
+        VariableValue* get = m_insertionSet.insert(
+            m_index, Get, m_value->origin(), variable);
+        if (verbose)
+            dataLog("    Inserting get of value: ", *get, "\n");
+        m_value->replaceWithIdentity(get);
+            
+        for (MemoryValue* match : matches) {
+            Vector& sets = m_sets.add(match, Vector()).iterator->value;
+
+            Value* value = replace(match, sets);
+            if (!value) {
+                if (match->isStore())
+                    value = match->child(0);
+                else
+                    value = match;
+            }
+                
+            Value* set = m_proc.add(Set, m_value->origin(), variable, value);
+            sets.append(set);
+        }
+
+        return true;
+    }
+
+    template
+    MemoryMatches findMemoryValue(Value* ptr, HeapRange range, const Filter& filter)
+    {
+        if (verbose)
+            dataLog(*m_value, ": looking backward for ", *ptr, "...\n");
+        
+        if (MemoryValue* match = m_data.memoryValuesAtTail.find(ptr, filter)) {
+            if (verbose)
+                dataLog("    Found ", *match, " locally.\n");
+            return { match };
+        }
+
+        if (m_data.writes.overlaps(range)) {
+            if (verbose)
+                dataLog("    Giving up because of writes.\n");
+            return { };
+        }
+
+        BlockWorklist worklist;
+        worklist.pushAll(m_block->predecessors());
+
+        MemoryMatches matches;
+
+        while (BasicBlock* block = worklist.pop()) {
+            if (verbose)
+                dataLog("    Looking at ", *block, "\n");
+
+            ImpureBlockData& data = m_impureBlockData[block];
+
+            MemoryValue* match = data.memoryValuesAtTail.find(ptr, filter);
+            if (match && match != m_value) {
+                if (verbose)
+                    dataLog("    Found match: ", *match, "\n");
+                matches.append(match);
+                continue;
+            }
+
+            if (data.writes.overlaps(range)) {
+                if (verbose)
+                    dataLog("    Giving up because of writes.\n");
+                return { };
+            }
+
+            if (!block->numPredecessors()) {
+                if (verbose)
+                    dataLog("    Giving up because it's live at root.\n");
+                // This essentially proves that this is live at the prologue. That means that we
+                // cannot reliably optimize this case.
+                return { };
+            }
+            
+            worklist.pushAll(block->predecessors());
+        }
+
+        if (verbose)
+            dataLog("    Got matches: ", pointerListDump(matches), "\n");
+        return matches;
+    }
+
+    Procedure& m_proc;
+
+    Dominators& m_dominators;
+    PureCSE m_pureCSE;
+    
+    IndexMap m_impureBlockData;
+
+    ImpureBlockData m_data;
+
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+
+    HashMap> m_sets;
+
+    InsertionSet m_insertionSet;
+
+    bool m_changed { false };
+};
+
+} // anonymous namespace
+
+bool eliminateCommonSubexpressions(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "eliminateCommonSubexpressions");
+
+    CSE cse(proc);
+    return cse.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3EliminateCommonSubexpressions.h b/b3/B3EliminateCommonSubexpressions.h
new file mode 100644
index 0000000..ce994be
--- /dev/null
+++ b/b3/B3EliminateCommonSubexpressions.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// This does global common subexpression elimination (CSE) over both pure values and memory accesses.
+
+bool eliminateCommonSubexpressions(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3FenceValue.cpp b/b3/B3FenceValue.cpp
new file mode 100644
index 0000000..80e2792
--- /dev/null
+++ b/b3/B3FenceValue.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3FenceValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+FenceValue::~FenceValue()
+{
+}
+
+Value* FenceValue::cloneImpl() const
+{
+    return new FenceValue(*this);
+}
+
+FenceValue::FenceValue(Origin origin, HeapRange read, HeapRange write)
+    : Value(CheckedOpcode, Fence, Void, origin)
+    , read(read)
+    , write(write)
+{
+}
+
+FenceValue::FenceValue(Origin origin)
+    : FenceValue(origin, HeapRange::top(), HeapRange::top())
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3FenceValue.h b/b3/B3FenceValue.h
new file mode 100644
index 0000000..d147052
--- /dev/null
+++ b/b3/B3FenceValue.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE FenceValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Fence; }
+    
+    ~FenceValue();
+    
+    // The read/write heaps are reflected in the effects() of this value. The compiler may change
+    // the lowering of a Fence based on the heaps. For example, if a fence does not write anything
+    // then it is understood to be a store-store fence. On x86, this may lead us to not emit any
+    // code, while on ARM we may emit a cheaper fence (dmb ishst instead of dmb ish). We will do
+    // the same optimization for load-load fences, which are expressed as a Fence that writes but
+    // does not read.
+    //
+    // This abstraction allows us to cover all of the fences on x86 and all of the standalone fences
+    // on ARM. X86 really just has one fence: mfence. This fence should be used to protect stores
+    // from being sunk below loads. WTF calls it the storeLoadFence. A classic example is the Steele
+    // barrier:
+    //
+    //     o.f = v  =>  o.f = v
+    //                  if (color(o) == black)
+    //                      log(o)
+    //
+    // We are trying to ensure that if the store to o.f occurs after the collector has started
+    // visiting o, then we will log o. Under sequential consistency, this would work. The collector
+    // would set color(o) to black just before it started visiting. But x86's illusion of sequential
+    // consistency is broken in exactly just this store->load ordering case. The store to o.f may
+    // get buffered, and it may occur some time after we have loaded and checked color(o). As well,
+    // the collector's store to set color(o) to black may get buffered and it may occur some time
+    // after the collector has finished visiting o. Therefore, we need mfences. In B3 we model this
+    // as a Fence that reads and writes some heaps. Setting writes to the empty set will cause B3 to
+    // not emit any barrier on x86.
+    //
+    // On ARM there are many more fences. The Fence instruction is meant to model just two of them:
+    // dmb ish and dmb ishst. You can emit a dmb ishst by using a Fence with an empty write heap.
+    // Otherwise, you will get a dmb ish.
+    // FIXME: Add fenced memory accesses. https://bugs.webkit.org/show_bug.cgi?id=162349
+    // FIXME: Add a Depend operation. https://bugs.webkit.org/show_bug.cgi?id=162350
+    HeapRange read { HeapRange::top() };
+    HeapRange write { HeapRange::top() };
+
+protected:
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+    
+    JS_EXPORT_PRIVATE FenceValue(Origin origin, HeapRange read, HeapRange write);
+    
+    JS_EXPORT_PRIVATE FenceValue(Origin origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3FixSSA.cpp b/b3/B3FixSSA.cpp
new file mode 100644
index 0000000..730c2c8
--- /dev/null
+++ b/b3/B3FixSSA.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FixSSA.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BreakCriticalEdges.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SSACalculator.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+const bool verbose = false;
+} // anonymous namespace
+
+void demoteValues(Procedure& proc, const IndexSet& values)
+{
+    HashMap map;
+    HashMap phiMap;
+
+    // Create stack slots.
+    for (Value* value : values.values(proc.values())) {
+        map.add(value, proc.addVariable(value->type()));
+
+        if (value->opcode() == Phi)
+            phiMap.add(value, proc.addVariable(value->type()));
+    }
+
+    if (verbose) {
+        dataLog("Demoting values as follows:\n");
+        dataLog("   map = ");
+        CommaPrinter comma;
+        for (auto& entry : map)
+            dataLog(comma, *entry.key, "=>", *entry.value);
+        dataLog("\n");
+        dataLog("   phiMap = ");
+        comma = CommaPrinter();
+        for (auto& entry : phiMap)
+            dataLog(comma, *entry.key, "=>", *entry.value);
+        dataLog("\n");
+    }
+
+    // Change accesses to the values to accesses to the stack slots.
+    InsertionSet insertionSet(proc);
+    for (BasicBlock* block : proc) {
+        for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+            Value* value = block->at(valueIndex);
+
+            if (value->opcode() == Phi) {
+                if (Variable* variable = phiMap.get(value)) {
+                    value->replaceWithIdentity(
+                        insertionSet.insert(
+                            valueIndex, Get, value->origin(), variable));
+                }
+            } else {
+                for (Value*& child : value->children()) {
+                    if (Variable* variable = map.get(child)) {
+                        child = insertionSet.insert(
+                            valueIndex, Get, value->origin(), variable);
+                    }
+                }
+
+                if (UpsilonValue* upsilon = value->as()) {
+                    if (Variable* variable = phiMap.get(upsilon->phi())) {
+                        insertionSet.insert(
+                            valueIndex, Set, upsilon->origin(), variable, upsilon->child(0));
+                        value->replaceWithNop();
+                    }
+                }
+            }
+
+            if (Variable* variable = map.get(value)) {
+                insertionSet.insert(
+                    valueIndex + 1, Set, value->origin(), variable, value);
+            }
+        }
+        insertionSet.execute(block);
+    }
+}
+
+bool fixSSA(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "fixSSA");
+
+    // Just for sanity, remove any unused variables first. It's unlikely that this code has any
+    // bugs having to do with dead variables, but it would be silly to have to fix such a bug if
+    // it did arise.
+    IndexSet liveVariables;
+    for (Value* value : proc.values()) {
+        if (VariableValue* variableValue = value->as())
+            liveVariables.add(variableValue->variable());
+    }
+
+    for (Variable* variable : proc.variables()) {
+        if (!liveVariables.contains(variable))
+            proc.deleteVariable(variable);
+    }
+
+    if (proc.variables().isEmpty())
+        return false;
+
+    // We know that we have variables to optimize, so do that now.
+    breakCriticalEdges(proc);
+
+    SSACalculator ssa(proc);
+
+    // Create a SSACalculator::Variable ("calcVar") for every variable.
+    Vector calcVarToVariable;
+    IndexMap variableToCalcVar(proc.variables().size());
+
+    for (Variable* variable : proc.variables()) {
+        SSACalculator::Variable* calcVar = ssa.newVariable();
+        RELEASE_ASSERT(calcVar->index() == calcVarToVariable.size());
+        calcVarToVariable.append(variable);
+        variableToCalcVar[variable] = calcVar;
+    }
+
+    // Create Defs for all of the stores to the stack variable.
+    for (BasicBlock* block : proc) {
+        for (Value* value : *block) {
+            if (value->opcode() != Set)
+                continue;
+
+            Variable* variable = value->as()->variable();
+
+            if (SSACalculator::Variable* calcVar = variableToCalcVar[variable])
+                ssa.newDef(calcVar, block, value->child(0));
+        }
+    }
+
+    // Decide where Phis are to be inserted. This creates them but does not insert them.
+    ssa.computePhis(
+        [&] (SSACalculator::Variable* calcVar, BasicBlock* block) -> Value* {
+            Variable* variable = calcVarToVariable[calcVar->index()];
+            Value* phi = proc.add(Phi, variable->type(), block->at(0)->origin());
+            if (verbose) {
+                dataLog(
+                    "Adding Phi for ", pointerDump(variable), " at ", *block, ": ",
+                    deepDump(proc, phi), "\n");
+            }
+            return phi;
+        });
+
+    // Now perform the conversion.
+    InsertionSet insertionSet(proc);
+    IndexMap mapping(proc.variables().size());
+    for (BasicBlock* block : proc.blocksInPreOrder()) {
+        mapping.clear();
+
+        for (unsigned index = calcVarToVariable.size(); index--;) {
+            Variable* variable = calcVarToVariable[index];
+            SSACalculator::Variable* calcVar = ssa.variable(index);
+
+            SSACalculator::Def* def = ssa.reachingDefAtHead(block, calcVar);
+            if (def)
+                mapping[variable] = def->value();
+        }
+
+        for (SSACalculator::Def* phiDef : ssa.phisForBlock(block)) {
+            Variable* variable = calcVarToVariable[phiDef->variable()->index()];
+
+            insertionSet.insertValue(0, phiDef->value());
+            mapping[variable] = phiDef->value();
+        }
+
+        for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+            Value* value = block->at(valueIndex);
+            value->performSubstitution();
+
+            switch (value->opcode()) {
+            case Get: {
+                VariableValue* variableValue = value->as();
+                Variable* variable = variableValue->variable();
+
+                if (Value* replacement = mapping[variable])
+                    value->replaceWithIdentity(replacement);
+                else {
+                    value->replaceWithIdentity(
+                        insertionSet.insertBottom(valueIndex, value));
+                }
+                break;
+            }
+                
+            case Set: {
+                VariableValue* variableValue = value->as();
+                Variable* variable = variableValue->variable();
+
+                mapping[variable] = value->child(0);
+                value->replaceWithNop();
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+
+        unsigned upsilonInsertionPoint = block->size() - 1;
+        Origin upsilonOrigin = block->last()->origin();
+        for (BasicBlock* successorBlock : block->successorBlocks()) {
+            for (SSACalculator::Def* phiDef : ssa.phisForBlock(successorBlock)) {
+                Value* phi = phiDef->value();
+                SSACalculator::Variable* calcVar = phiDef->variable();
+                Variable* variable = calcVarToVariable[calcVar->index()];
+
+                Value* mappedValue = mapping[variable];
+                if (verbose) {
+                    dataLog(
+                        "Mapped value for ", *variable, " with successor Phi ", *phi,
+                        " at end of ", *block, ": ", pointerDump(mappedValue), "\n");
+                }
+                
+                if (!mappedValue)
+                    mappedValue = insertionSet.insertBottom(upsilonInsertionPoint, phi);
+                
+                insertionSet.insert(
+                    upsilonInsertionPoint, upsilonOrigin, mappedValue, phi);
+            }
+        }
+
+        insertionSet.execute(block);
+    }
+
+    if (verbose) {
+        dataLog("B3 after SSA conversion:\n");
+        dataLog(proc);
+    }
+
+    return true;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3FixSSA.h b/b3/B3FixSSA.h
new file mode 100644
index 0000000..775c322
--- /dev/null
+++ b/b3/B3FixSSA.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Turns all mentions of the given values into accesses to variables. This is meant to be used
+// from phases that don't like SSA for whatever reason.
+void demoteValues(Procedure&, const IndexSet&);
+
+// This fixes SSA for you. Use this after you have done demoteValues() and you have performed
+// whatever evil transformation you needed.
+bool fixSSA(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3FoldPathConstants.cpp b/b3/B3FoldPathConstants.cpp
new file mode 100644
index 0000000..24a0134
--- /dev/null
+++ b/b3/B3FoldPathConstants.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FoldPathConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class FoldPathConstants {
+public:
+    FoldPathConstants(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        bool changed = false;
+
+        if (verbose)
+            dataLog("B3 before folding path constants: \n", m_proc, "\n");
+        
+        // Find all of the values that are the subject of a branch or switch. For any successor
+        // that we dominate, install a value override at that block.
+
+        HashMap> overrides;
+
+        Dominators& dominators = m_proc.dominators();
+        
+        auto addOverride = [&] (
+            BasicBlock* from, Value* value, const Override& override) {
+
+            if (override.block->numPredecessors() != 1)
+                return;
+            ASSERT(override.block->predecessor(0) == from);
+
+            Vector& forValue =
+                overrides.add(value, Vector()).iterator->value;
+
+            if (!ASSERT_DISABLED) {
+                for (const Override& otherOverride : forValue)
+                    ASSERT_UNUSED(otherOverride, otherOverride.block != override.block);
+            }
+
+            if (verbose)
+                dataLog("Overriding ", *value, " from ", *from, ": ", override, "\n");
+            
+            forValue.append(override);
+        };
+        
+        for (BasicBlock* block : m_proc) {
+            Value* branch = block->last();
+            switch (branch->opcode()) {
+            case Branch:
+                if (block->successorBlock(0) == block->successorBlock(1))
+                    continue;
+                addOverride(
+                    block, branch->child(0),
+                    Override::nonZero(block->successorBlock(0)));
+                addOverride(
+                    block, branch->child(0),
+                    Override::constant(block->successorBlock(1), 0));
+                break;
+            case Switch: {
+                HashMap targetUses;
+                for (const SwitchCase& switchCase : branch->as()->cases(block))
+                    targetUses.add(switchCase.targetBlock(), 0).iterator->value++;
+
+                for (const SwitchCase& switchCase : branch->as()->cases(block)) {
+                    if (targetUses.find(switchCase.targetBlock())->value != 1)
+                        continue;
+
+                    addOverride(
+                        block, branch->child(0),
+                        Override::constant(switchCase.targetBlock(), switchCase.caseValue()));
+                }
+                break;
+            }
+            default:
+                break;
+            }
+        }
+
+        // Install the constants in the override blocks. We use one-shot insertion sets because
+        // each block will get at most one thing inserted into it anyway.
+        for (auto& entry : overrides) {
+            for (Override& override : entry.value) {
+                if (!override.hasValue)
+                    continue;
+                override.valueNode =
+                    m_insertionSet.insertIntConstant(0, entry.key, override.value);
+                m_insertionSet.execute(override.block);
+            }
+        }
+
+        // Replace all uses of a value that has an override with that override, if appropriate.
+        // Certain instructions get special treatment.
+        auto getOverride = [&] (BasicBlock* block, Value* value) -> Override {
+            auto iter = overrides.find(value);
+            if (iter == overrides.end())
+                return Override();
+
+            Vector& forValue = iter->value;
+            Override result;
+            for (Override& override : forValue) {
+                if (dominators.dominates(override.block, block)
+                    && override.isBetterThan(result))
+                    result = override;
+            }
+
+            if (verbose)
+                dataLog("In block ", *block, " getting override for ", *value, ": ", result, "\n");
+
+            return result;
+        };
+        
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+
+                switch (value->opcode()) {
+                case Branch: {
+                    if (getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithJump(block, block->taken());
+                        changed = true;
+                    }
+                    break;
+                }
+
+                case Equal: {
+                    if (value->child(1)->isInt(0)
+                        && getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithIdentity(
+                            m_insertionSet.insertIntConstant(valueIndex, value, 0));
+                    }
+                    break;
+                }
+
+                case NotEqual: {
+                    if (value->child(1)->isInt(0)
+                        && getOverride(block, value->child(0)).isNonZero) {
+                        value->replaceWithIdentity(
+                            m_insertionSet.insertIntConstant(valueIndex, value, 1));
+                    }
+                    break;
+                }
+
+                default:
+                    break;
+                }
+
+                for (Value*& child : value->children()) {
+                    Override override = getOverride(block, child);
+                    if (override.valueNode)
+                        child = override.valueNode;
+                }
+            }
+            m_insertionSet.execute(block);
+        }
+
+        if (changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+    }
+    
+private:
+    struct Override {
+        Override()
+        {
+        }
+
+        static Override constant(BasicBlock* block, int64_t value)
+        {
+            Override result;
+            result.block = block;
+            result.hasValue = true;
+            result.value = value;
+            if (value)
+                result.isNonZero = true;
+            return result;
+        }
+
+        static Override nonZero(BasicBlock* block)
+        {
+            Override result;
+            result.block = block;
+            result.isNonZero = true;
+            return result;
+        }
+
+        bool isBetterThan(const Override& override)
+        {
+            if (hasValue && !override.hasValue)
+                return true;
+            if (isNonZero && !override.isNonZero)
+                return true;
+            return false;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print("{block = ", pointerDump(block), ", value = ");
+            if (hasValue)
+                out.print(value);
+            else
+                out.print("");
+            out.print(", isNonZero = ", isNonZero);
+            if (valueNode)
+                out.print(", valueNode = ", *valueNode);
+            out.print("}");
+        }
+
+        BasicBlock* block { nullptr };
+        bool hasValue { false };
+        bool isNonZero { false };
+        int64_t value { 0 };
+        Value* valueNode { nullptr };
+    };
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void foldPathConstants(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "foldPathConstants");
+    FoldPathConstants foldPathConstants(proc);
+    foldPathConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3FoldPathConstants.h b/b3/B3FoldPathConstants.h
new file mode 100644
index 0000000..a55c770
--- /dev/null
+++ b/b3/B3FoldPathConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does very basic simplification of uses of values that were branched on by a dominating branch.
+
+void foldPathConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3FrequencyClass.cpp b/b3/B3FrequencyClass.cpp
new file mode 100644
index 0000000..816850c
--- /dev/null
+++ b/b3/B3FrequencyClass.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3FrequencyClass.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, FrequencyClass frequency)
+{
+    switch (frequency) {
+    case FrequencyClass::Normal:
+        out.print("Normal");
+        return;
+    case FrequencyClass::Rare:
+        out.print("Rare");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3FrequencyClass.h b/b3/B3FrequencyClass.h
new file mode 100644
index 0000000..607c439
--- /dev/null
+++ b/b3/B3FrequencyClass.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+enum class FrequencyClass : uint8_t {
+    // We don't have any hypothesis about the frequency of this control flow construct. This is
+    // the common case. We can still use basic block frequency in this case.
+    Normal,
+
+    // We expect that this control flow construct will be reached super rarely. It's valid to
+    // perform optimizations that punish Rare code. Note that there will be situations where you
+    // have to somehow construct a new frequency class from a merging of multiple classes. When
+    // this happens, never choose Rare; always go with Normal. This is necessary because we
+    // really do punish Rare code very badly.
+    Rare
+};
+
+inline FrequencyClass maxFrequency(FrequencyClass a, FrequencyClass b)
+{
+    if (a == FrequencyClass::Normal)
+        return FrequencyClass::Normal;
+    return b;
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::FrequencyClass);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3FrequentedBlock.h b/b3/B3FrequentedBlock.h
new file mode 100644
index 0000000..9b63ff4
--- /dev/null
+++ b/b3/B3FrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock FrequentedBlock;
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Generate.cpp b/b3/B3Generate.cpp
new file mode 100644
index 0000000..e328c6a
--- /dev/null
+++ b/b3/B3Generate.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Generate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "B3Common.h"
+#include "B3DuplicateTails.h"
+#include "B3EliminateCommonSubexpressions.h"
+#include "B3FixSSA.h"
+#include "B3FoldPathConstants.h"
+#include "B3InferSwitches.h"
+#include "B3LegalizeMemoryOffsets.h"
+#include "B3LowerMacros.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+#include "B3LowerToAir.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceDoubleToFloat.h"
+#include "B3ReduceStrength.h"
+#include "B3TimingScope.h"
+#include "B3Validate.h"
+#include "PCToCodeOriginMap.h"
+
+namespace JSC { namespace B3 {
+
+void prepareForGeneration(Procedure& procedure, unsigned optLevel)
+{
+    TimingScope timingScope("prepareForGeneration");
+
+    generateToAir(procedure, optLevel);
+    Air::prepareForGeneration(procedure.code());
+}
+
+void generate(Procedure& procedure, CCallHelpers& jit)
+{
+    Air::generate(procedure.code(), jit);
+}
+
+void generateToAir(Procedure& procedure, unsigned optLevel)
+{
+    TimingScope timingScope("generateToAir");
+    
+    if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("Initial B3:\n");
+        dataLog(procedure);
+    }
+
+    // We don't require the incoming IR to have predecessors computed.
+    procedure.resetReachability();
+    
+    if (shouldValidateIR())
+        validate(procedure);
+
+    if (optLevel >= 1) {
+        reduceDoubleToFloat(procedure);
+        reduceStrength(procedure);
+        eliminateCommonSubexpressions(procedure);
+        inferSwitches(procedure);
+        duplicateTails(procedure);
+        fixSSA(procedure);
+        foldPathConstants(procedure);
+        
+        // FIXME: Add more optimizations here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150507
+    }
+
+    lowerMacros(procedure);
+
+    if (optLevel >= 1) {
+        reduceStrength(procedure);
+
+        // FIXME: Add more optimizations here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150507
+    }
+
+    lowerMacrosAfterOptimizations(procedure);
+    legalizeMemoryOffsets(procedure);
+    moveConstants(procedure);
+
+    // FIXME: We should run pureCSE here to clean up some platform specific changes from the previous phases.
+    // https://bugs.webkit.org/show_bug.cgi?id=164873
+
+    if (shouldValidateIR())
+        validate(procedure);
+    
+    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+    // Note that lowerToAir() acts like a phase in this regard.
+    if (shouldDumpIR(B3Mode) && !shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("B3 after ", procedure.lastPhaseName(), ", before generation:\n");
+        dataLog(procedure);
+    }
+
+    lowerToAir(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Generate.h b/b3/B3Generate.h
new file mode 100644
index 0000000..2ffcd0e
--- /dev/null
+++ b/b3/B3Generate.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This takes a B3::Procedure, optimizes it in-place, lowers it to Air, and prepares the Air for
+// generation.
+JS_EXPORT_PRIVATE void prepareForGeneration(Procedure&, unsigned optLevel = 1);
+
+// This takes a B3::Procedure that has been prepared for generation (i.e. it has been lowered to Air and
+// the Air has been prepared for generation) and generates it. This is the equivalent of calling
+// Air::generate() on the Procedure::code().
+JS_EXPORT_PRIVATE void generate(Procedure&, CCallHelpers&);
+
+// This takes a B3::Procedure, optimizes it in-place, and lowers it to Air. You can then generate
+// the Air to machine code using Air::prepareForGeneration() and Air::generate() on the Procedure's
+// code().
+void generateToAir(Procedure&, unsigned optLevel = 1);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3GenericFrequentedBlock.h b/b3/B3GenericFrequentedBlock.h
new file mode 100644
index 0000000..1c5e75c
--- /dev/null
+++ b/b3/B3GenericFrequentedBlock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequencyClass.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+// A frequented block is a tuple of BasicBlock* and FrequencyClass. It's usually used as a
+// successor edge.
+
+template
+class GenericFrequentedBlock {
+public:
+    GenericFrequentedBlock(
+        BasicBlock* block = nullptr, FrequencyClass frequency = FrequencyClass::Normal)
+        : m_block(block)
+        , m_frequency(frequency)
+    {
+    }
+
+    bool operator==(const GenericFrequentedBlock& other) const
+    {
+        return m_block == other.m_block
+            && m_frequency == other.m_frequency;
+    }
+
+    bool operator!=(const GenericFrequentedBlock& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const
+    {
+        return *this != GenericFrequentedBlock();
+    }
+
+    BasicBlock* block() const { return m_block; }
+    BasicBlock*& block() { return m_block; }
+    FrequencyClass frequency() const { return m_frequency; }
+    FrequencyClass& frequency() { return m_frequency; }
+
+    bool isRare() const { return frequency() == FrequencyClass::Rare; }
+
+    void dump(PrintStream& out) const
+    {
+        if (frequency() != FrequencyClass::Normal)
+            out.print(frequency(), ":");
+        out.print(pointerDump(m_block));
+    }
+
+private:
+    BasicBlock* m_block;
+    FrequencyClass m_frequency;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3HeapRange.cpp b/b3/B3HeapRange.cpp
new file mode 100644
index 0000000..a5768f9
--- /dev/null
+++ b/b3/B3HeapRange.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3HeapRange.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void HeapRange::dump(PrintStream& out) const
+{
+    if (*this == HeapRange()) {
+        out.print("Bottom");
+        return;
+    }
+    if (*this == top()) {
+        out.print("Top");
+        return;
+    }
+    out.print(m_begin, "...", m_end);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3HeapRange.h b/b3/B3HeapRange.h
new file mode 100644
index 0000000..03866bd
--- /dev/null
+++ b/b3/B3HeapRange.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// Alias analysis in B3 is done by checking if two integer ranges overlap. This is powerful enough
+// to be used for TBAA-style alias analysis used by the DFG, FTL, and LLVM: you just turn each node
+// in the tree of abstract heaps into a pre/post range.
+//
+// Note that the 'begin' is inclusive, while the 'end' is exclusive. These two ranges are non-
+// overlapping:
+//
+//     rangeA = 0...8
+//     rangeB = 8...16
+
+class HeapRange {
+public:
+    typedef unsigned Type;
+    
+    HeapRange()
+        : m_begin(0)
+        , m_end(0)
+    {
+    }
+
+    explicit HeapRange(unsigned value)
+        : m_begin(value)
+        , m_end(value + 1)
+    {
+        ASSERT(m_end >= m_begin);
+    }
+
+    HeapRange(unsigned begin, unsigned end)
+        : m_begin(begin)
+        , m_end(end)
+    {
+        ASSERT(m_end >= m_begin);
+        if (m_begin == m_end) {
+            // Canonicalize empty ranges.
+            m_begin = 0;
+            m_end = 0;
+        }
+    }
+
+    static HeapRange top()
+    {
+        return HeapRange(0, UINT_MAX);
+    }
+
+    bool operator==(const HeapRange& other) const
+    {
+        return m_begin == other.m_begin
+            && m_end == other.m_end;
+    }
+
+    bool operator!=(const HeapRange& other) const
+    {
+        return !(*this == other);
+    }
+    
+    explicit operator bool() const { return m_begin != m_end; }
+
+    unsigned begin() const { return m_begin; }
+    unsigned end() const { return m_end; }
+
+    bool overlaps(const HeapRange& other) const
+    {
+        return WTF::rangesOverlap(m_begin, m_end, other.m_begin, other.m_end);
+    }
+
+    JS_EXPORT_PRIVATE void dump(PrintStream& out) const;
+
+private:
+    unsigned m_begin;
+    unsigned m_end;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3InferSwitches.cpp b/b3/B3InferSwitches.cpp
new file mode 100644
index 0000000..2f17812
--- /dev/null
+++ b/b3/B3InferSwitches.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3InferSwitches.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3SwitchValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+const bool verbose = false;
+
+class InferSwitches {
+public:
+    InferSwitches(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_useCounts(proc)
+    {
+    }
+    
+    bool run()
+    {
+        if (verbose)
+            dataLog("B3 before inferSwitches:\n", m_proc);
+        
+        bool changed = true;
+        bool everChanged = false;
+        while (changed) {
+            changed = false;
+            
+            if (verbose)
+                dataLog("Performing fixpoint iteration:\n");
+            
+            for (BasicBlock* block : m_proc)
+                changed |= attemptToMergeWithPredecessor(block);
+
+            everChanged |= changed;
+        }
+        
+        if (everChanged) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+            
+            m_proc.deleteOrphans();
+            
+            if (verbose)
+                dataLog("B3 after inferSwitches:\n", m_proc);
+            return true;
+        }
+        
+        return false;
+    }
+    
+private:
+    bool attemptToMergeWithPredecessor(BasicBlock* block)
+    {
+        // No point in considering the root block. We also don't consider blocks with multiple
+        // predecessors, but we could handle this if we made this code a bit more general and we were
+        // not afraid of code bloat.
+        if (block->numPredecessors() != 1)
+            return false;
+        
+        SwitchDescription description = describe(block);
+        if (verbose)
+            dataLog("Description of primary block ", *block, ": ", description, "\n");
+        if (!description) {
+            if (verbose)
+                dataLog("    Bailing because not switch-like.\n");
+            return false;
+        }
+        
+        // We know that this block behaves like a switch. But we need to verify that it doesn't also
+        // perform any effects or do expensive things. We don't want to create a switch if that will
+        // make expensive things execute unconditionally. We're very conservative about how we define
+        // "expensive".
+        for (Value* value : *block) {
+            if (value->isFree())
+                continue;
+            if (value == description.extra)
+                continue;
+            if (value == description.branch)
+                continue;
+            if (verbose)
+                dataLog("    Bailing because of ", deepDump(m_proc, value), "\n");
+            return false;
+        }
+        
+        BasicBlock* predecessor = block->predecessor(0);
+        SwitchDescription predecessorDescription = describe(predecessor);
+        if (verbose)
+            dataLog("    Description of predecessor block ", *predecessor, ": ", predecessorDescription, "\n");
+        if (!predecessorDescription) {
+            if (verbose)
+                dataLog("    Bailing because not switch-like.\n");
+            return false;
+        }
+        
+        // Both us and the predecessor are switch-like, but that doesn't mean that we're compatible.
+        // We may be switching on different values!
+        if (description.source != predecessorDescription.source) {
+            if (verbose)
+                dataLog("    Bailing because sources don't match.\n");
+            return false;
+        }
+        
+        // We expect that we are the fall-through destination of the predecessor. This is a bit of a
+        // goofy condition. If we were not the fall-through destination then our switch is probably
+        // just totally redundant and we should be getting rid of it. But we don't handle that here,
+        // yet.
+        if (predecessorDescription.fallThrough.block() != block) {
+            if (verbose)
+                dataLog("    Bailing because fall-through of predecessor is not the primary block.\n");
+            return false;
+        }
+        
+        // Make sure that there ain't no loops.
+        if (description.fallThrough.block() == block
+            || description.fallThrough.block() == predecessor) {
+            if (verbose)
+                dataLog("    Bailing because of fall-through loop.\n");
+            return false;
+        }
+        for (SwitchCase switchCase : description.cases) {
+            if (switchCase.targetBlock() == block
+                || switchCase.targetBlock() == predecessor) {
+                if (verbose)
+                    dataLog("    Bailing because of loop in primary cases.\n");
+                return false;
+            }
+        }
+        for (SwitchCase switchCase : predecessorDescription.cases) {
+            if (switchCase.targetBlock() == block
+                || switchCase.targetBlock() == predecessor) {
+                if (verbose)
+                    dataLog("    Bailing because of loop in predecessor cases.\n");
+                return false;
+            }
+        }
+        
+        if (verbose)
+            dataLog("    Doing it!\n");
+        // We're committed to doing the thing.
+        
+        // Delete the extra value from the predecessor, since that would break downstream inference
+        // on the next fixpoint iteration. We would think that this block is too expensive to merge
+        // because of the Equal or NotEqual value even though that value is dead! We know it's dead
+        // so we kill it ourselves.
+        for (Value* value : *predecessor) {
+            if (value == predecessorDescription.extra)
+                value->replaceWithNopIgnoringType();
+        }
+        
+        // Insert all non-terminal values from our block into our predecessor. We definitely need to
+        // do this for constants. We must not do it for the extra value, since that would break
+        // downstream inference on the next fixpoint iteration. As a bonus, we don't do it for nops,
+        // so that we limit how big blocks get in this phase.
+        for (unsigned i = 0; i < block->size() - 1; ++i) {
+            Value* value = block->at(i);
+            if (value != description.extra && value->opcode() != Nop)
+                m_insertionSet.insertValue(predecessor->size() - 1, value);
+        }
+        m_insertionSet.execute(predecessor);
+        block->values().resize(0);
+        block->appendNew(m_proc, Oops, description.branch->origin());
+        block->removePredecessor(predecessor);
+        
+        for (BasicBlock* successorBlock : description.block->successorBlocks())
+            successorBlock->replacePredecessor(block, predecessor);
+
+        block->clearSuccessors();
+        
+        SwitchValue* switchValue = predecessor->replaceLastWithNew(
+            m_proc, predecessor->last()->origin(), description.source);
+        predecessor->clearSuccessors();
+        switchValue->setFallThrough(description.fallThrough);
+        
+        Vector predecessorCases;
+        for (SwitchCase switchCase : predecessorDescription.cases) {
+            switchValue->appendCase(switchCase);
+            predecessorCases.append(switchCase.caseValue());
+        }
+        std::sort(predecessorCases.begin(), predecessorCases.end());
+        auto isPredecessorCase = [&] (int64_t value) -> bool {
+            return !!tryBinarySearch(
+                predecessorCases, predecessorCases.size(), value,
+                [] (int64_t* element) -> int64_t { return *element; });
+        };
+        
+        for (SwitchCase switchCase : description.cases) {
+            if (!isPredecessorCase(switchCase.caseValue()))
+                switchValue->appendCase(switchCase);
+        }
+        return true;
+    }
+
+    struct SwitchDescription {
+        SwitchDescription()
+        {
+        }
+        
+        explicit operator bool() { return !!block; }
+        
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{block = ", pointerDump(block),
+                ", branch = ", pointerDump(branch),
+                ", extra = ", pointerDump(extra),
+                ", source = ", pointerDump(source),
+                ", cases = ", listDump(cases),
+                ", fallThrough = ", fallThrough, "}");
+        }
+
+        BasicBlock* block { nullptr };
+        Value* branch { nullptr };
+        Value* extra { nullptr }; // This is the Equal or NotEqual value, if applicable.
+        Value* source { nullptr };
+        Vector cases;
+        FrequentedBlock fallThrough;
+    };
+    
+    SwitchDescription describe(BasicBlock* block)
+    {
+        SwitchDescription result;
+        result.block = block;
+        result.branch = block->last();
+        
+        switch (result.branch->opcode()) {
+        case Branch: {
+            Value* predicate = result.branch->child(0);
+            FrequentedBlock taken = result.block->taken();
+            FrequentedBlock notTaken = result.block->notTaken();
+            bool handled = false;
+            // NOTE: This uses UseCounts that we computed before any transformation. This is fine
+            // because although we may have mutated the IR, we would not have added any new
+            // predicates.
+            if (predicate->numChildren() == 2
+                && predicate->child(1)->hasInt()
+                && m_useCounts.numUses(predicate) == 1) {
+                switch (predicate->opcode()) {
+                case Equal:
+                    result.source = predicate->child(0);
+                    result.extra = predicate;
+                    result.cases.append(SwitchCase(predicate->child(1)->asInt(), taken));
+                    result.fallThrough = notTaken;
+                    handled = true;
+                    break;
+                case NotEqual:
+                    result.source = predicate->child(0);
+                    result.extra = predicate;
+                    result.cases.append(SwitchCase(predicate->child(1)->asInt(), notTaken));
+                    result.fallThrough = taken;
+                    handled = true;
+                    break;
+                default:
+                    break;
+                }
+            }
+            if (handled)
+                break;
+            result.source = predicate;
+            result.cases.append(SwitchCase(0, notTaken));
+            result.fallThrough = taken;
+            break;
+        }
+            
+        case Switch: {
+            SwitchValue* switchValue = result.branch->as();
+            result.source = switchValue->child(0);
+            for (SwitchCase switchCase : switchValue->cases(result.block))
+                result.cases.append(switchCase);
+            result.fallThrough = result.block->fallThrough();
+            break;
+        }
+            
+        default:
+            result.block = nullptr;
+            result.branch = nullptr;
+            break;
+        }
+        
+        return result;
+    }
+    
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    UseCounts m_useCounts;
+};
+
+} // anonymous namespace
+
+bool inferSwitches(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "inferSwitches");
+    InferSwitches inferSwitches(proc);
+    return inferSwitches.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3InferSwitches.h b/b3/B3InferSwitches.h
new file mode 100644
index 0000000..d0466f8
--- /dev/null
+++ b/b3/B3InferSwitches.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Fixpoints to convert chains of branches into switches.
+
+bool inferSwitches(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABE(B3_JIT)
diff --git a/b3/B3InsertionSet.cpp b/b3/B3InsertionSet.cpp
new file mode 100644
index 0000000..a6e119f
--- /dev/null
+++ b/b3/B3InsertionSet.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3InsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+Value* InsertionSet::insertIntConstant(size_t index, Origin origin, Type type, int64_t value)
+{
+    return insertValue(index, m_procedure.addIntConstant(origin, type, value));
+}
+
+Value* InsertionSet::insertIntConstant(size_t index, Value* likeValue, int64_t value)
+{
+    return insertIntConstant(index, likeValue->origin(), likeValue->type(), value);
+}
+
+Value* InsertionSet::insertBottom(size_t index, Origin origin, Type type)
+{
+    Value*& bottom = m_bottomForType[type];
+    if (!bottom)
+        bottom = insertValue(index, m_procedure.addBottom(origin, type));
+    return bottom;
+}
+
+Value* InsertionSet::insertBottom(size_t index, Value* likeValue)
+{
+    return insertBottom(index, likeValue->origin(), likeValue->type());
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+    executeInsertions(block->m_values, m_insertions);
+    m_bottomForType = TypeMap();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3InsertionSet.h b/b3/B3InsertionSet.h
new file mode 100644
index 0000000..1eb5272
--- /dev/null
+++ b/b3/B3InsertionSet.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "B3Type.h"
+#include "B3TypeMap.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+typedef WTF::Insertion Insertion;
+
+class InsertionSet {
+public:
+    InsertionSet(Procedure& procedure)
+        : m_procedure(procedure)
+    {
+    }
+
+    bool isEmpty() const { return m_insertions.isEmpty(); }
+
+    Procedure& code() { return m_procedure; }
+
+    void appendInsertion(const Insertion& insertion)
+    {
+        m_insertions.append(insertion);
+    }
+
+    Value* insertValue(size_t index, Value* value)
+    {
+        appendInsertion(Insertion(index, value));
+        return value;
+    }
+
+    template
+    ValueType* insert(size_t index, Arguments... arguments);
+
+    Value* insertIntConstant(size_t index, Origin, Type, int64_t value);
+    Value* insertIntConstant(size_t index, Value* likeValue, int64_t value);
+
+    Value* insertBottom(size_t index, Origin, Type);
+    Value* insertBottom(size_t index, Value*);
+
+    void execute(BasicBlock*);
+
+private:
+    Procedure& m_procedure;
+    Vector m_insertions;
+
+    TypeMap m_bottomForType;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3InsertionSetInlines.h b/b3/B3InsertionSetInlines.h
new file mode 100644
index 0000000..c5b03df
--- /dev/null
+++ b/b3/B3InsertionSetInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3InsertionSet.h"
+#include "B3ProcedureInlines.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* InsertionSet::insert(size_t index, Arguments... arguments)
+{
+    return static_cast(insertValue(index, m_procedure.add(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Kind.cpp b/b3/B3Kind.cpp
new file mode 100644
index 0000000..147ab23
--- /dev/null
+++ b/b3/B3Kind.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Kind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+void Kind::dump(PrintStream& out) const
+{
+    out.print(m_opcode);
+    
+    CommaPrinter comma(", ", "<");
+    if (isChill())
+        out.print(comma, "Chill");
+    if (traps())
+        out.print(comma, "Traps");
+    if (comma.didPrint())
+        out.print(">");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Kind.h b/b3/B3Kind.h
new file mode 100644
index 0000000..268c8e7
--- /dev/null
+++ b/b3/B3Kind.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef B3Kind_h
+#define B3Kind_h
+
+#if ENABLE(B3_JIT)
+
+#include "B3Opcode.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// A Kind is a terse summary of what a Value does. There is a fixed number of possible
+// Kinds. Kind is a tuple of Opcode (see B3Opcode.h) and some extra bits. Most opcodes don't
+// get any extra bits, and those bits must remain zero if the Kind's opcode field is set to
+// one of those opcodes. The purpose of Kind is to be like an opcode in other IRs, but to
+// be multidimensional. For example, a Load has many dimensions of customization that we may
+// eventually implement. A Load can have different alignments, alignment failure modes,
+// temporality modes, trapping modes, ordering modes, etc. It's fine to put such flags into
+// subclasses of Value, but in some cases that would be overkill, particularly since if you
+// did that for a pure value then you'd also have to thread it through ValueKey. It's much
+// easier to put it in Kind, and then your extra bit will get carried around by everyone who
+// knows how to carry around Kinds. Most importantly, putting flags into Kind allows you to
+// use them as part of B3::Value's dynamic cast facility. For example we could have a
+// trapping Load that uses a Value subclass that has a stackmap while non-trapping Loads
+// continue to use the normal MemoryValue.
+//
+// Note that any code in the compiler that transcribes IR (like a strength reduction that
+// replaces an Add with a different Add, or even with a different opcode entirely) will
+// probably drop unknown bits by default. This is definitely not correct for many bits (like
+// isChill for Div/Mod and all of the envisioned Load/Store flags), so if you add a new bit
+// you will probably have to audit the compiler to make sure that phases that transcribe
+// your opcode do the right thing with your bit.
+
+class Kind {
+public:
+    Kind(Opcode opcode)
+        : m_opcode(opcode)
+        , m_isChill(false)
+        , m_traps(false)
+    {
+    }
+    
+    Kind()
+        : Kind(Oops)
+    {
+    }
+    
+    Opcode opcode() const { return m_opcode; }
+    void setOpcode(Opcode opcode) { m_opcode = opcode; }
+    
+    bool hasExtraBits() const { return m_isChill || m_traps; }
+    
+    // Chill bit. This applies to division-based arithmetic ops, which may trap on some
+    // platforms or exhibit bizarre behavior when passed certain inputs. The non-chill
+    // version will behave as unpredictably as it wants. For example, it's legal to
+    // constant-fold Div(x, 0) to any value or to replace it with any effectful operation.
+    // But when it's chill, that means that the semantics when it would have trapped are
+    // the JS semantics. For example, Div(@a, @b) means:
+    //
+    //     ((a | 0) / (b | 0)) | 0
+    //
+    // And Mod(a, b) means:
+    //
+    //     ((a | 0) % (b | 0)) | 0
+    //
+    // Note that Div matches exactly how ARM handles integer division.
+    bool hasIsChill() const
+    {
+        switch (m_opcode) {
+        case Div:
+        case Mod:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool isChill() const
+    {
+        return m_isChill;
+    }
+    void setIsChill(bool isChill)
+    {
+        ASSERT(hasIsChill());
+        m_isChill = isChill;
+    }
+    
+    // Traps bit. This applies to memory access ops. It means that the instruction could
+    // trap as part of some check it performs, and that we mean to make this observable. This
+    // currently only applies to memory accesses (loads and stores). You don't get to find out where
+    // in the Procedure the trap happened. If you try to work it out using Origin, you'll have a bad
+    // time because the instruction selector is too sloppy with Origin().
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=162688
+    bool hasTraps() const
+    {
+        switch (m_opcode) {
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool traps() const
+    {
+        return m_traps;
+    }
+    void setTraps(bool traps)
+    {
+        ASSERT(hasTraps());
+        m_traps = traps;
+    }
+    
+    // Rules for adding new properties:
+    // - Put the accessors here.
+    // - hasBlah() should check if the opcode allows for your property.
+    // - blah() returns a default value if !hasBlah()
+    // - setBlah() asserts if !hasBlah()
+    // - Try not to increase the size of Kind too much. But it wouldn't be the end of the
+    //   world if it bloated to 64 bits.
+    
+    bool operator==(const Kind& other) const
+    {
+        return m_opcode == other.m_opcode
+            && m_isChill == other.m_isChill
+            && m_traps == other.m_traps;
+    }
+    
+    bool operator!=(const Kind& other) const
+    {
+        return !(*this == other);
+    }
+    
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        // It's almost certainly more important that this hash function is cheap to compute than
+        // anything else. We can live with some kind hash collisions.
+        return m_opcode + (static_cast(m_isChill) << 16) + (static_cast(m_traps) << 7);
+    }
+    
+    Kind(WTF::HashTableDeletedValueType)
+        : m_opcode(Oops)
+        , m_isChill(true)
+        , m_traps(false)
+    {
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Kind(WTF::HashTableDeletedValue);
+    }
+    
+private:
+    Opcode m_opcode;
+    bool m_isChill : 1;
+    bool m_traps : 1;
+};
+
+// For every flag 'foo' you add, it's customary to create a Kind B3::foo(Kind) function that makes
+// a kind with the flag set. For example, for chill, this lets us say:
+//
+//     block->appendNew(m_proc, chill(Mod), Origin(), a, b);
+//
+// I like to make the flag name fill in the sentence "Mod _____" (like "isChill" or "traps") while
+// the flag constructor fills in the phrase "_____ Mod" (like "chill" or "trapping").
+
+inline Kind chill(Kind kind)
+{
+    kind.setIsChill(true);
+    return kind;
+}
+
+inline Kind trapping(Kind kind)
+{
+    kind.setTraps(true);
+    return kind;
+}
+
+struct KindHash {
+    static unsigned hash(const Kind& key) { return key.hash(); }
+    static bool equal(const Kind& a, const Kind& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::KindHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public SimpleClassHashTraits {
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
+
+#endif // B3Kind_h
+
diff --git a/b3/B3LegalizeMemoryOffsets.cpp b/b3/B3LegalizeMemoryOffsets.cpp
new file mode 100644
index 0000000..8c17ff5
--- /dev/null
+++ b/b3/B3LegalizeMemoryOffsets.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3LegalizeMemoryOffsets.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LegalizeMemoryOffsets {
+public:
+    LegalizeMemoryOffsets(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        if (!isARM64())
+            return;
+
+        for (BasicBlock* block : m_proc) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                MemoryValue* memoryValue = block->at(index)->as();
+                if (!memoryValue)
+                    continue;
+
+                int32_t offset = memoryValue->offset();
+                Air::Arg::Width width = Air::Arg::widthForBytes(memoryValue->accessByteSize());
+                if (!Air::Arg::isValidAddrForm(offset, width)) {
+                    Value* base = memoryValue->lastChild();
+                    Value* offsetValue = m_insertionSet.insertIntConstant(index, memoryValue->origin(), pointerType(), offset);
+                    Value* resolvedAddress = m_proc.add(Add, memoryValue->origin(), base, offsetValue);
+                    m_insertionSet.insertValue(index, resolvedAddress);
+
+                    memoryValue->lastChild() = resolvedAddress;
+                    memoryValue->setOffset(0);
+                }
+            }
+            m_insertionSet.execute(block);
+        }
+    }
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void legalizeMemoryOffsets(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "legalizeMemoryOffsets");
+    LegalizeMemoryOffsets legalizeMemoryOffsets(proc);
+    legalizeMemoryOffsets.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3LegalizeMemoryOffsets.h b/b3/B3LegalizeMemoryOffsets.h
new file mode 100644
index 0000000..c482ab2
--- /dev/null
+++ b/b3/B3LegalizeMemoryOffsets.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// If the offsets of a MemoryValue cannot be represented in the target instruction set,
+// compute it explicitly.
+void legalizeMemoryOffsets(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3LowerMacros.cpp b/b3/B3LowerMacros.cpp
new file mode 100644
index 0000000..6841510
--- /dev/null
+++ b/b3/B3LowerMacros.cpp
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3CaseCollectionInlines.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "LinkBuffer.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+    LowerMacros(Procedure& proc)
+        : m_proc(proc)
+        , m_blockInsertionSet(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        for (BasicBlock* block : m_proc) {
+            m_block = block;
+            processCurrentBlock();
+        }
+        m_changed |= m_blockInsertionSet.execute();
+        if (m_changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+        return m_changed;
+    }
+    
+private:
+    void processCurrentBlock()
+    {
+        for (m_index = 0; m_index < m_block->size(); ++m_index) {
+            m_value = m_block->at(m_index);
+            m_origin = m_value->origin();
+            switch (m_value->opcode()) {
+            case Mod: {
+                if (m_value->isChill()) {
+                    if (isARM64()) {
+                        BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+                        BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+                        BasicBlock* normalModCase = m_blockInsertionSet.insertBefore(m_block);
+
+                        before->replaceLastWithNew(m_proc, Branch, m_origin, m_value->child(1));
+                        before->setSuccessors(
+                            FrequentedBlock(normalModCase, FrequencyClass::Normal),
+                            FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+                        Value* divResult = normalModCase->appendNew(m_proc, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+                        Value* multipliedBack = normalModCase->appendNew(m_proc, Mul, m_origin, divResult, m_value->child(1));
+                        Value* result = normalModCase->appendNew(m_proc, Sub, m_origin, m_value->child(0), multipliedBack);
+                        UpsilonValue* normalResult = normalModCase->appendNew(m_proc, m_origin, result);
+                        normalModCase->appendNew(m_proc, Jump, m_origin);
+                        normalModCase->setSuccessors(FrequentedBlock(m_block));
+
+                        UpsilonValue* zeroResult = zeroDenCase->appendNew(
+                            m_proc, m_origin,
+                            zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+                        zeroDenCase->appendNew(m_proc, Jump, m_origin);
+                        zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+                        Value* phi = m_insertionSet.insert(m_index, Phi, m_value->type(), m_origin);
+                        normalResult->setPhi(phi);
+                        zeroResult->setPhi(phi);
+                        m_value->replaceWithIdentity(phi);
+                        before->updatePredecessorsAfter();
+                        m_changed = true;
+                    } else
+                        makeDivisionChill(Mod);
+                    break;
+                }
+                
+                double (*fmodDouble)(double, double) = fmod;
+                if (m_value->type() == Double) {
+                    Value* functionAddress = m_insertionSet.insert(m_index, m_origin, fmodDouble);
+                    Value* result = m_insertionSet.insert(m_index, Double, m_origin,
+                        Effects::none(),
+                        functionAddress,
+                        m_value->child(0),
+                        m_value->child(1));
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                } else if (m_value->type() == Float) {
+                    Value* numeratorAsDouble = m_insertionSet.insert(m_index, FloatToDouble, m_origin, m_value->child(0));
+                    Value* denominatorAsDouble = m_insertionSet.insert(m_index, FloatToDouble, m_origin, m_value->child(1));
+                    Value* functionAddress = m_insertionSet.insert(m_index, m_origin, fmodDouble);
+                    Value* doubleMod = m_insertionSet.insert(m_index, Double, m_origin,
+                        Effects::none(),
+                        functionAddress,
+                        numeratorAsDouble,
+                        denominatorAsDouble);
+                    Value* result = m_insertionSet.insert(m_index, DoubleToFloat, m_origin, doubleMod);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                } else if (isARM64()) {
+                    Value* divResult = m_insertionSet.insert(m_index, chill(Div), m_origin, m_value->child(0), m_value->child(1));
+                    Value* multipliedBack = m_insertionSet.insert(m_index, Mul, m_origin, divResult, m_value->child(1));
+                    Value* result = m_insertionSet.insert(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                }
+                break;
+            }
+
+            case UMod: {
+                if (isARM64()) {
+                    Value* divResult = m_insertionSet.insert(m_index, UDiv, m_origin, m_value->child(0), m_value->child(1));
+                    Value* multipliedBack = m_insertionSet.insert(m_index, Mul, m_origin, divResult, m_value->child(1));
+                    Value* result = m_insertionSet.insert(m_index, Sub, m_origin, m_value->child(0), multipliedBack);
+                    m_value->replaceWithIdentity(result);
+                    m_changed = true;
+                }
+                break;
+            }
+
+            case Div: {
+                if (m_value->isChill())
+                    makeDivisionChill(Div);
+                break;
+            }
+
+            case Switch: {
+                SwitchValue* switchValue = m_value->as();
+                Vector cases;
+                for (const SwitchCase& switchCase : switchValue->cases(m_block))
+                    cases.append(switchCase);
+                std::sort(
+                    cases.begin(), cases.end(),
+                    [] (const SwitchCase& left, const SwitchCase& right) {
+                        return left.caseValue() < right.caseValue();
+                    });
+                FrequentedBlock fallThrough = m_block->fallThrough();
+                m_block->values().removeLast();
+                recursivelyBuildSwitch(cases, fallThrough, 0, false, cases.size(), m_block);
+                m_proc.deleteValue(switchValue);
+                m_block->updatePredecessorsAfter();
+                m_changed = true;
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+        m_insertionSet.execute(m_block);
+    }
+
+    void makeDivisionChill(Opcode nonChillOpcode)
+    {
+        ASSERT(nonChillOpcode == Div || nonChillOpcode == Mod);
+
+        // ARM supports this instruction natively.
+        if (isARM64())
+            return;
+
+        // We implement "res = Div/Mod(num, den)" as follows:
+        //
+        //     if (den + 1 <=_unsigned 1) {
+        //         if (!den) {
+        //             res = 0;
+        //             goto done;
+        //         }
+        //         if (num == -2147483648) {
+        //             res = isDiv ? num : 0;
+        //             goto done;
+        //         }
+        //     }
+        //     res = num (/ or %) dev;
+        // done:
+        m_changed = true;
+
+        Value* num = m_value->child(0);
+        Value* den = m_value->child(1);
+
+        Value* one = m_insertionSet.insertIntConstant(m_index, m_value, 1);
+        Value* isDenOK = m_insertionSet.insert(
+            m_index, Above, m_origin,
+            m_insertionSet.insert(m_index, Add, m_origin, den, one),
+            one);
+
+        BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+        BasicBlock* normalDivCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* shadyDenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* neg1DenCase = m_blockInsertionSet.insertBefore(m_block);
+        BasicBlock* intMinCase = m_blockInsertionSet.insertBefore(m_block);
+
+        before->replaceLastWithNew(m_proc, Branch, m_origin, isDenOK);
+        before->setSuccessors(
+            FrequentedBlock(normalDivCase, FrequencyClass::Normal),
+            FrequentedBlock(shadyDenCase, FrequencyClass::Rare));
+
+        UpsilonValue* normalResult = normalDivCase->appendNew(
+            m_proc, m_origin,
+            normalDivCase->appendNew(m_proc, nonChillOpcode, m_origin, num, den));
+        normalDivCase->appendNew(m_proc, Jump, m_origin);
+        normalDivCase->setSuccessors(FrequentedBlock(m_block));
+
+        shadyDenCase->appendNew(m_proc, Branch, m_origin, den);
+        shadyDenCase->setSuccessors(
+            FrequentedBlock(neg1DenCase, FrequencyClass::Normal),
+            FrequentedBlock(zeroDenCase, FrequencyClass::Rare));
+
+        UpsilonValue* zeroResult = zeroDenCase->appendNew(
+            m_proc, m_origin,
+            zeroDenCase->appendIntConstant(m_proc, m_value, 0));
+        zeroDenCase->appendNew(m_proc, Jump, m_origin);
+        zeroDenCase->setSuccessors(FrequentedBlock(m_block));
+
+        int64_t badNumeratorConst = 0;
+        switch (m_value->type()) {
+        case Int32:
+            badNumeratorConst = std::numeric_limits::min();
+            break;
+        case Int64:
+            badNumeratorConst = std::numeric_limits::min();
+            break;
+        default:
+            ASSERT_NOT_REACHED();
+            badNumeratorConst = 0;
+        }
+
+        Value* badNumerator =
+            neg1DenCase->appendIntConstant(m_proc, m_value, badNumeratorConst);
+
+        neg1DenCase->appendNew(
+            m_proc, Branch, m_origin,
+            neg1DenCase->appendNew(
+                m_proc, Equal, m_origin, num, badNumerator));
+        neg1DenCase->setSuccessors(
+            FrequentedBlock(intMinCase, FrequencyClass::Rare),
+            FrequentedBlock(normalDivCase, FrequencyClass::Normal));
+
+        Value* intMinResult = nonChillOpcode == Div ? badNumerator : intMinCase->appendIntConstant(m_proc, m_value, 0);
+        UpsilonValue* intMinResultUpsilon = intMinCase->appendNew(
+            m_proc, m_origin, intMinResult);
+        intMinCase->appendNew(m_proc, Jump, m_origin);
+        intMinCase->setSuccessors(FrequentedBlock(m_block));
+
+        Value* phi = m_insertionSet.insert(
+            m_index, Phi, m_value->type(), m_origin);
+        normalResult->setPhi(phi);
+        zeroResult->setPhi(phi);
+        intMinResultUpsilon->setPhi(phi);
+
+        m_value->replaceWithIdentity(phi);
+        before->updatePredecessorsAfter();
+    }
+
+    void recursivelyBuildSwitch(
+        const Vector& cases, FrequentedBlock fallThrough, unsigned start, bool hardStart,
+        unsigned end, BasicBlock* before)
+    {
+        Value* child = m_value->child(0);
+        Type type = child->type();
+        
+        // It's a good idea to use a table-based switch in some cases: the number of cases has to be
+        // large enough and they have to be dense enough. This could probably be improved a lot. For
+        // example, we could still use a jump table in cases where the inputs are sparse so long as we
+        // shift off the uninteresting bits. On the other hand, it's not clear that this would
+        // actually be any better than what we have done here and it's not clear that it would be
+        // better than a binary switch.
+        const unsigned minCasesForTable = 7;
+        const unsigned densityLimit = 4;
+        if (end - start >= minCasesForTable) {
+            int64_t firstValue = cases[start].caseValue();
+            int64_t lastValue = cases[end - 1].caseValue();
+            if ((lastValue - firstValue + 1) / (end - start) < densityLimit) {
+                BasicBlock* switchBlock = m_blockInsertionSet.insertAfter(m_block);
+                Value* index = before->appendNew(
+                    m_proc, Sub, m_origin, child,
+                    before->appendIntConstant(m_proc, m_origin, type, firstValue));
+                before->appendNew(
+                    m_proc, Branch, m_origin,
+                    before->appendNew(
+                        m_proc, Above, m_origin, index,
+                        before->appendIntConstant(m_proc, m_origin, type, lastValue - firstValue)));
+                before->setSuccessors(fallThrough, FrequentedBlock(switchBlock));
+                
+                size_t tableSize = lastValue - firstValue + 1;
+                
+                if (index->type() != pointerType() && index->type() == Int32)
+                    index = switchBlock->appendNew(m_proc, ZExt32, m_origin, index);
+                
+                PatchpointValue* patchpoint =
+                    switchBlock->appendNew(m_proc, Void, m_origin);
+
+                // Even though this loads from the jump table, the jump table is immutable. For the
+                // purpose of alias analysis, reading something immutable is like reading nothing.
+                patchpoint->effects = Effects();
+                patchpoint->effects.terminal = true;
+                
+                patchpoint->appendSomeRegister(index);
+                patchpoint->numGPScratchRegisters++;
+                // Technically, we don't have to clobber macro registers on X86_64. This is probably
+                // OK though.
+                patchpoint->clobber(RegisterSet::macroScratchRegisters());
+                
+                BitVector handledIndices;
+                for (unsigned i = start; i < end; ++i) {
+                    FrequentedBlock block = cases[i].target();
+                    int64_t value = cases[i].caseValue();
+                    switchBlock->appendSuccessor(block);
+                    size_t index = value - firstValue;
+                    ASSERT(!handledIndices.get(index));
+                    handledIndices.set(index);
+                }
+                
+                bool hasUnhandledIndex = false;
+                for (unsigned i = 0; i < tableSize; ++i) {
+                    if (!handledIndices.get(i)) {
+                        hasUnhandledIndex = true;
+                        break;
+                    }
+                }
+                
+                if (hasUnhandledIndex)
+                    switchBlock->appendSuccessor(fallThrough);
+
+                patchpoint->setGenerator(
+                    [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                        AllowMacroScratchRegisterUsage allowScratch(jit);
+                        
+                        MacroAssemblerCodePtr* jumpTable = static_cast(
+                            params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * tableSize));
+                        
+                        GPRReg index = params[0].gpr();
+                        GPRReg scratch = params.gpScratch(0);
+                        
+                        jit.move(CCallHelpers::TrustedImmPtr(jumpTable), scratch);
+                        jit.jump(CCallHelpers::BaseIndex(scratch, index, CCallHelpers::timesPtr()));
+                        
+                        // These labels are guaranteed to be populated before either late paths or
+                        // link tasks run.
+                        Vector> labels = params.successorLabels();
+                        
+                        jit.addLinkTask(
+                            [=] (LinkBuffer& linkBuffer) {
+                                if (hasUnhandledIndex) {
+                                    MacroAssemblerCodePtr fallThrough =
+                                        linkBuffer.locationOf(*labels.last());
+                                    for (unsigned i = tableSize; i--;)
+                                        jumpTable[i] = fallThrough;
+                                }
+                                
+                                unsigned labelIndex = 0;
+                                for (unsigned tableIndex : handledIndices) {
+                                    jumpTable[tableIndex] =
+                                        linkBuffer.locationOf(*labels[labelIndex++]);
+                                }
+                            });
+                    });
+                return;
+            }
+        }
+        
+        // See comments in jit/BinarySwitch.cpp for a justification of this algorithm. The only
+        // thing we do differently is that we don't use randomness.
+
+        const unsigned leafThreshold = 3;
+
+        unsigned size = end - start;
+
+        if (size <= leafThreshold) {
+            bool allConsecutive = false;
+
+            if ((hardStart || (start && cases[start - 1].caseValue() == cases[start].caseValue() - 1))
+                && end < cases.size()
+                && cases[end - 1].caseValue() == cases[end].caseValue() - 1) {
+                allConsecutive = true;
+                for (unsigned i = 0; i < size - 1; ++i) {
+                    if (cases[start + i].caseValue() + 1 != cases[start + i + 1].caseValue()) {
+                        allConsecutive = false;
+                        break;
+                    }
+                }
+            }
+
+            unsigned limit = allConsecutive ? size - 1 : size;
+            
+            for (unsigned i = 0; i < limit; ++i) {
+                BasicBlock* nextCheck = m_blockInsertionSet.insertAfter(m_block);
+                before->appendNew(
+                    m_proc, Branch, m_origin,
+                    before->appendNew(
+                        m_proc, Equal, m_origin, child,
+                        before->appendIntConstant(
+                            m_proc, m_origin, type,
+                            cases[start + i].caseValue())));
+                before->setSuccessors(cases[start + i].target(), FrequentedBlock(nextCheck));
+
+                before = nextCheck;
+            }
+
+            before->appendNew(m_proc, Jump, m_origin);
+            if (allConsecutive)
+                before->setSuccessors(cases[end - 1].target());
+            else
+                before->setSuccessors(fallThrough);
+            return;
+        }
+
+        unsigned medianIndex = (start + end) / 2;
+
+        BasicBlock* left = m_blockInsertionSet.insertAfter(m_block);
+        BasicBlock* right = m_blockInsertionSet.insertAfter(m_block);
+
+        before->appendNew(
+            m_proc, Branch, m_origin,
+            before->appendNew(
+                m_proc, LessThan, m_origin, child,
+                before->appendIntConstant(
+                    m_proc, m_origin, type,
+                    cases[medianIndex].caseValue())));
+        before->setSuccessors(FrequentedBlock(left), FrequentedBlock(right));
+
+        recursivelyBuildSwitch(cases, fallThrough, start, hardStart, medianIndex, left);
+        recursivelyBuildSwitch(cases, fallThrough, medianIndex, true, end, right);
+    }
+    
+    Procedure& m_proc;
+    BlockInsertionSet m_blockInsertionSet;
+    InsertionSet m_insertionSet;
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+    Origin m_origin;
+    bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+    LowerMacros lowerMacros(proc);
+    return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacros(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "lowerMacros");
+    bool result = lowerMacrosImpl(proc);
+    if (shouldValidateIR())
+        RELEASE_ASSERT(!lowerMacrosImpl(proc));
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3LowerMacros.h b/b3/B3LowerMacros.h
new file mode 100644
index 0000000..f9649e2
--- /dev/null
+++ b/b3/B3LowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lowers high-level operations that it's easier to deal with once they are broken up. Currently
+// this includes Switch and ChillDiv.
+
+bool lowerMacros(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3LowerMacrosAfterOptimizations.cpp b/b3/B3LowerMacrosAfterOptimizations.cpp
new file mode 100644
index 0000000..dbe158b
--- /dev/null
+++ b/b3/B3LowerMacrosAfterOptimizations.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerMacrosAfterOptimizations.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3CCallValue.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class LowerMacros {
+public:
+    LowerMacros(Procedure& proc)
+        : m_proc(proc)
+        , m_blockInsertionSet(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        for (BasicBlock* block : m_proc) {
+            m_block = block;
+            processCurrentBlock();
+        }
+        m_changed |= m_blockInsertionSet.execute();
+        if (m_changed) {
+            m_proc.resetReachability();
+            m_proc.invalidateCFG();
+        }
+        return m_changed;
+    }
+    
+private:
+    void processCurrentBlock()
+    {
+        for (m_index = 0; m_index < m_block->size(); ++m_index) {
+            m_value = m_block->at(m_index);
+            m_origin = m_value->origin();
+            switch (m_value->opcode()) {
+            case Abs: {
+                // ARM supports this instruction natively.
+                if (isARM64())
+                    break;
+
+                Value* mask = nullptr;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_origin, bitwise_cast(~(1ll << 63)));
+                else if (m_value->type() == Float)
+                    mask = m_insertionSet.insert(m_index, m_origin, bitwise_cast(~(1 << 31)));
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+                Value* result = m_insertionSet.insert(m_index, BitAnd, m_origin, m_value->child(0), mask);
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Ceil: {
+                if (MacroAssembler::supportsFloatingPointRounding())
+                    break;
+
+                Value* functionAddress = nullptr;
+                if (m_value->type() == Double) {
+                    double (*ceilDouble)(double) = ceil;
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, ceilDouble);
+                } else if (m_value->type() == Float)
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, ceilf);
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+
+                Value* result = m_insertionSet.insert(m_index,
+                    m_value->type(),
+                    m_origin,
+                    Effects::none(),
+                    functionAddress,
+                    m_value->child(0));
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Floor: {
+                if (MacroAssembler::supportsFloatingPointRounding())
+                    break;
+
+                Value* functionAddress = nullptr;
+                if (m_value->type() == Double) {
+                    double (*floorDouble)(double) = floor;
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, floorDouble);
+                } else if (m_value->type() == Float)
+                    functionAddress = m_insertionSet.insert(m_index, m_origin, floorf);
+                else
+                    RELEASE_ASSERT_NOT_REACHED();
+
+                Value* result = m_insertionSet.insert(m_index,
+                    m_value->type(),
+                    m_origin,
+                    Effects::none(),
+                    functionAddress,
+                    m_value->child(0));
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+            case Neg: {
+                if (!isFloat(m_value->type()))
+                    break;
+                
+                // X86 is odd in that it requires this.
+                if (!isX86())
+                    break;
+
+                Value* mask = nullptr;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_origin, -0.0);
+                else {
+                    RELEASE_ASSERT(m_value->type() == Float);
+                    mask = m_insertionSet.insert(m_index, m_origin, -0.0f);
+                }
+
+                Value* result = m_insertionSet.insert(
+                    m_index, BitXor, m_origin, m_value->child(0), mask);
+                m_value->replaceWithIdentity(result);
+                break;
+            }
+
+            case RotL: {
+                // ARM64 doesn't have a rotate left.
+                if (isARM64()) {
+                    if (isARM64()) {
+                        Value* newShift = m_insertionSet.insert(m_index, Neg, m_value->origin(), m_value->child(1));
+                        Value* rotate = m_insertionSet.insert(m_index, RotR, m_value->origin(), m_value->child(0), newShift);
+                        m_value->replaceWithIdentity(rotate);
+                        break;
+                    }
+                }
+                break;
+            }
+            default:
+                break;
+            }
+        }
+        m_insertionSet.execute(m_block);
+    }
+    
+    Procedure& m_proc;
+    BlockInsertionSet m_blockInsertionSet;
+    InsertionSet m_insertionSet;
+    BasicBlock* m_block;
+    unsigned m_index;
+    Value* m_value;
+    Origin m_origin;
+    bool m_changed { false };
+};
+
+bool lowerMacrosImpl(Procedure& proc)
+{
+    LowerMacros lowerMacros(proc);
+    return lowerMacros.run();
+}
+
+} // anonymous namespace
+
+bool lowerMacrosAfterOptimizations(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "lowerMacrosAfterOptimizations");
+    bool result = lowerMacrosImpl(proc);
+    if (shouldValidateIR())
+        RELEASE_ASSERT(!lowerMacrosImpl(proc));
+    return result;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3LowerMacrosAfterOptimizations.h b/b3/B3LowerMacrosAfterOptimizations.h
new file mode 100644
index 0000000..f7b6536
--- /dev/null
+++ b/b3/B3LowerMacrosAfterOptimizations.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Lower certain high level opcodes to lower-level opcode to help code generation.
+
+bool lowerMacrosAfterOptimizations(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3LowerToAir.cpp b/b3/B3LowerToAir.cpp
new file mode 100644
index 0000000..1b2585f
--- /dev/null
+++ b/b3/B3LowerToAir.cpp
@@ -0,0 +1,2899 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3LowerToAir.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BlockWorklist.h"
+#include "B3CCallValue.h"
+#include "B3CheckSpecial.h"
+#include "B3Commutativity.h"
+#include "B3Dominators.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3PatchpointSpecial.h"
+#include "B3PatchpointValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include 
+#include 
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+namespace {
+
+const bool verbose = false;
+
+class LowerToAir {
+public:
+    LowerToAir(Procedure& procedure)
+        : m_valueToTmp(procedure.values().size())
+        , m_phiToTmp(procedure.values().size())
+        , m_blockToBlock(procedure.size())
+        , m_useCounts(procedure)
+        , m_phiChildren(procedure)
+        , m_dominators(procedure.dominators())
+        , m_procedure(procedure)
+        , m_code(procedure.code())
+    {
+    }
+
+    void run()
+    {
+        for (B3::BasicBlock* block : m_procedure)
+            m_blockToBlock[block] = m_code.addBlock(block->frequency());
+        
+        for (Value* value : m_procedure.values()) {
+            switch (value->opcode()) {
+            case Phi: {
+                m_phiToTmp[value] = m_code.newTmp(Arg::typeForB3Type(value->type()));
+                if (verbose)
+                    dataLog("Phi tmp for ", *value, ": ", m_phiToTmp[value], "\n");
+                break;
+            }
+            default:
+                break;
+            }
+        }
+
+        for (B3::StackSlot* stack : m_procedure.stackSlots())
+            m_stackToStack.add(stack, m_code.addStackSlot(stack));
+        for (Variable* variable : m_procedure.variables())
+            m_variableToTmp.add(variable, m_code.newTmp(Arg::typeForB3Type(variable->type())));
+
+        // Figure out which blocks are not rare.
+        m_fastWorklist.push(m_procedure[0]);
+        while (B3::BasicBlock* block = m_fastWorklist.pop()) {
+            for (B3::FrequentedBlock& successor : block->successors()) {
+                if (!successor.isRare())
+                    m_fastWorklist.push(successor.block());
+            }
+        }
+
+        m_procedure.resetValueOwners(); // Used by crossesInterference().
+
+        // Lower defs before uses on a global level. This is a good heuristic to lock down a
+        // hoisted address expression before we duplicate it back into the loop.
+        for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) {
+            m_block = block;
+            // Reset some state.
+            m_insts.resize(0);
+
+            m_isRare = !m_fastWorklist.saw(block);
+
+            if (verbose)
+                dataLog("Lowering Block ", *block, ":\n");
+            
+            // Process blocks in reverse order so we see uses before defs. That's what allows us
+            // to match patterns effectively.
+            for (unsigned i = block->size(); i--;) {
+                m_index = i;
+                m_value = block->at(i);
+                if (m_locked.contains(m_value))
+                    continue;
+                m_insts.append(Vector());
+                if (verbose)
+                    dataLog("Lowering ", deepDump(m_procedure, m_value), ":\n");
+                lower();
+                if (verbose) {
+                    for (Inst& inst : m_insts.last())
+                        dataLog("    ", inst, "\n");
+                }
+            }
+
+            // Now append the instructions. m_insts contains them in reverse order, so we process
+            // it in reverse.
+            for (unsigned i = m_insts.size(); i--;) {
+                for (Inst& inst : m_insts[i])
+                    m_blockToBlock[block]->appendInst(WTFMove(inst));
+            }
+
+            // Make sure that the successors are set up correctly.
+            for (B3::FrequentedBlock successor : block->successors()) {
+                m_blockToBlock[block]->successors().append(
+                    Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency()));
+            }
+        }
+
+        Air::InsertionSet insertionSet(m_code);
+        for (Inst& inst : m_prologue)
+            insertionSet.insertInst(0, WTFMove(inst));
+        insertionSet.execute(m_code[0]);
+    }
+
+private:
+    bool shouldCopyPropagate(Value* value)
+    {
+        switch (value->opcode()) {
+        case Trunc:
+        case Identity:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    class ArgPromise {
+        WTF_MAKE_NONCOPYABLE(ArgPromise);
+    public:
+        ArgPromise() { }
+
+        ArgPromise(const Arg& arg, Value* valueToLock = nullptr)
+            : m_arg(arg)
+            , m_value(valueToLock)
+        {
+        }
+        
+        void swap(ArgPromise& other)
+        {
+            std::swap(m_arg, other.m_arg);
+            std::swap(m_value, other.m_value);
+            std::swap(m_wasConsumed, other.m_wasConsumed);
+            std::swap(m_wasWrapped, other.m_wasWrapped);
+            std::swap(m_traps, other.m_traps);
+        }
+        
+        ArgPromise(ArgPromise&& other)
+        {
+            swap(other);
+        }
+        
+        ArgPromise& operator=(ArgPromise&& other)
+        {
+            swap(other);
+            return *this;
+        }
+        
+        ~ArgPromise()
+        {
+            if (m_wasConsumed)
+                RELEASE_ASSERT(m_wasWrapped);
+        }
+        
+        void setTraps(bool value)
+        {
+            m_traps = value;
+        }
+
+        static ArgPromise tmp(Value* value)
+        {
+            ArgPromise result;
+            result.m_value = value;
+            return result;
+        }
+
+        explicit operator bool() const { return m_arg || m_value; }
+
+        Arg::Kind kind() const
+        {
+            if (!m_arg && m_value)
+                return Arg::Tmp;
+            return m_arg.kind();
+        }
+
+        const Arg& peek() const
+        {
+            return m_arg;
+        }
+
+        Arg consume(LowerToAir& lower)
+        {
+            m_wasConsumed = true;
+            if (!m_arg && m_value)
+                return lower.tmp(m_value);
+            if (m_value)
+                lower.commitInternal(m_value);
+            return m_arg;
+        }
+        
+        template
+        Inst inst(Args&&... args)
+        {
+            Inst result(std::forward(args)...);
+            result.kind.traps |= m_traps;
+            m_wasWrapped = true;
+            return result;
+        }
+        
+    private:
+        // Three forms:
+        // Everything null: invalid.
+        // Arg non-null, value null: just use the arg, nothing special.
+        // Arg null, value non-null: it's a tmp, pin it when necessary.
+        // Arg non-null, value non-null: use the arg, lock the value.
+        Arg m_arg;
+        Value* m_value { nullptr };
+        bool m_wasConsumed { false };
+        bool m_wasWrapped { false };
+        bool m_traps { false };
+    };
+
+    // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet.
+    // Here are three canonical ways of using tmp() and tmpPromise():
+    //
+    // Idiom #1: You know that you want a tmp() and you know that it will be valid for the
+    // instruction you're emitting.
+    //
+    //     append(Foo, tmp(bar));
+    //
+    // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the
+    // instruction will accept it, so you query first. Note that the call to tmp() happens only after
+    // you are sure that you will use it.
+    //
+    //     if (isValidForm(Foo, Arg::Tmp))
+    //         append(Foo, tmp(bar))
+    //
+    // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after
+    // it's sure it will use the tmp. That's deliberate. Also note that you're required to pass any
+    // Inst you create with consumed promises through that promise's inst() function.
+    //
+    //     ArgPromise promise = tmpPromise(bar);
+    //     if (isValidForm(Foo, promise.kind()))
+    //         append(promise.inst(Foo, promise.consume(*this)))
+    //
+    // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually
+    // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means
+    // that we will henceforth require that the value of 'bar' is generated as a separate
+    // instruction. We don't want to pin the value to a temporary if we might change our minds, and
+    // pass an address operand representing 'bar' to Foo instead.
+    //
+    // Because tmp() pins, the following is not an idiom you should use:
+    //
+    //     Tmp tmp = this->tmp(bar);
+    //     if (isValidForm(Foo, tmp.kind()))
+    //         append(Foo, tmp);
+    //
+    // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a
+    // temporary. You might later want to try to do something like loadPromise(), and that will fail.
+    // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code
+    // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right
+    // value is already pinned.
+    //
+    //     auto tryThings = [this] (const Arg& left, const Arg& right) {
+    //         if (isValidForm(Foo, left.kind(), right.kind()))
+    //             return Inst(Foo, m_value, left, right);
+    //         return Inst();
+    //     };
+    //     if (Inst result = tryThings(loadAddr(left), tmp(right)))
+    //         return result;
+    //     if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds.
+    //         return result;
+    //     return Inst(Foo, m_value, tmp(left), tmp(right));
+    //
+    // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code
+    // will run correctly - it will generate OK code - but the second form is never matched.
+    // loadAddr(right) will never succeed because it will observe that 'right' is already pinned.
+    // Of course, it's exactly because of the risky nature of such code that we don't have a
+    // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will
+    // work fine if written as:
+    //
+    //     auto tryThings = [this] (ArgPromise& left, ArgPromise& right) {
+    //         if (isValidForm(Foo, left.kind(), right.kind()))
+    //             return left.inst(right.inst(Foo, m_value, left.consume(*this), right.consume(*this)));
+    //         return Inst();
+    //     };
+    //     if (Inst result = tryThings(loadPromise(left), tmpPromise(right)))
+    //         return result;
+    //     if (Inst result = tryThings(tmpPromise(left), loadPromise(right)))
+    //         return result;
+    //     return Inst(Foo, m_value, tmp(left), tmp(right));
+    //
+    // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure
+    // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so
+    // doesn't prevent us from trying loadPromise on the same value.
+    Tmp tmp(Value* value)
+    {
+        Tmp& tmp = m_valueToTmp[value];
+        if (!tmp) {
+            while (shouldCopyPropagate(value))
+                value = value->child(0);
+
+            if (value->opcode() == FramePointer)
+                return Tmp(GPRInfo::callFrameRegister);
+
+            Tmp& realTmp = m_valueToTmp[value];
+            if (!realTmp) {
+                realTmp = m_code.newTmp(Arg::typeForB3Type(value->type()));
+                if (m_procedure.isFastConstant(value->key()))
+                    m_code.addFastTmp(realTmp);
+                if (verbose)
+                    dataLog("Tmp for ", *value, ": ", realTmp, "\n");
+            }
+            tmp = realTmp;
+        }
+        return tmp;
+    }
+
+    ArgPromise tmpPromise(Value* value)
+    {
+        return ArgPromise::tmp(value);
+    }
+
+    bool canBeInternal(Value* value)
+    {
+        // If one of the internal things has already been computed, then we don't want to cause
+        // it to be recomputed again.
+        if (m_valueToTmp[value])
+            return false;
+        
+        // We require internals to have only one use - us. It's not clear if this should be numUses() or
+        // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear
+        // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply
+        // doesn't matter because we don't implement patterns that would trigger this.
+        if (m_useCounts.numUses(value) != 1)
+            return false;
+
+        return true;
+    }
+
+    // If you ask canBeInternal() and then construct something from that, and you commit to emitting
+    // that code, then you must commitInternal() on that value. This is tricky, and you only need to
+    // do it if you're pattern matching by hand rather than using the patterns language. Long story
+    // short, you should avoid this by using the pattern matcher to match patterns.
+    void commitInternal(Value* value)
+    {
+        if (value)
+            m_locked.add(value);
+    }
+
+    bool crossesInterference(Value* value)
+    {
+        // If it's in a foreign block, then be conservative. We could handle this if we were
+        // willing to do heavier analysis. For example, if we had liveness, then we could label
+        // values as "crossing interference" if they interfere with anything that they are live
+        // across. But, it's not clear how useful this would be.
+        if (value->owner != m_value->owner)
+            return true;
+
+        Effects effects = value->effects();
+
+        for (unsigned i = m_index; i--;) {
+            Value* otherValue = m_block->at(i);
+            if (otherValue == value)
+                return false;
+            if (effects.interferes(otherValue->effects()))
+                return true;
+        }
+
+        ASSERT_NOT_REACHED();
+        return true;
+    }
+    
+    std::optional scaleForShl(Value* shl, int32_t offset, std::optional width = std::nullopt)
+    {
+        if (shl->opcode() != Shl)
+            return std::nullopt;
+        if (!shl->child(1)->hasInt32())
+            return std::nullopt;
+        unsigned logScale = shl->child(1)->asInt32();
+        if (shl->type() == Int32)
+            logScale &= 31;
+        else
+            logScale &= 63;
+        // Use 64-bit math to perform the shift so that <<32 does the right thing, but then switch
+        // to signed since that's what all of our APIs want.
+        int64_t bigScale = static_cast(1) << static_cast(logScale);
+        if (!isRepresentableAs(bigScale))
+            return std::nullopt;
+        unsigned scale = static_cast(bigScale);
+        if (!Arg::isValidIndexForm(scale, offset, width))
+            return std::nullopt;
+        return scale;
+    }
+
+    // This turns the given operand into an address.
+    Arg effectiveAddr(Value* address, int32_t offset, Arg::Width width)
+    {
+        ASSERT(Arg::isValidAddrForm(offset, width));
+
+        auto fallback = [&] () -> Arg {
+            return Arg::addr(tmp(address), offset);
+        };
+        
+        static const unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually.
+
+        // Only match if the address value isn't used in some large number of places.
+        if (m_useCounts.numUses(address) > lotsOfUses)
+            return fallback();
+        
+        switch (address->opcode()) {
+        case Add: {
+            Value* left = address->child(0);
+            Value* right = address->child(1);
+
+            auto tryIndex = [&] (Value* index, Value* base) -> Arg {
+                std::optional scale = scaleForShl(index, offset, width);
+                if (!scale)
+                    return Arg();
+                if (m_locked.contains(index->child(0)) || m_locked.contains(base))
+                    return Arg();
+                return Arg::index(tmp(base), tmp(index->child(0)), *scale, offset);
+            };
+
+            if (Arg result = tryIndex(left, right))
+                return result;
+            if (Arg result = tryIndex(right, left))
+                return result;
+
+            if (m_locked.contains(left) || m_locked.contains(right)
+                || !Arg::isValidIndexForm(1, offset, width))
+                return fallback();
+            
+            return Arg::index(tmp(left), tmp(right), 1, offset);
+        }
+
+        case Shl: {
+            Value* left = address->child(0);
+
+            // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift
+            // amount is greater than 1, then there isn't really anything smart that we could do here.
+            // We avoid using baseless indexes because their encoding isn't particularly efficient.
+            if (m_locked.contains(left) || !address->child(1)->isInt32(1)
+                || !Arg::isValidIndexForm(1, offset, width))
+                return fallback();
+
+            return Arg::index(tmp(left), tmp(left), 1, offset);
+        }
+
+        case FramePointer:
+            return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset);
+
+        case SlotBase:
+            return Arg::stack(m_stackToStack.get(address->as()->slot()), offset);
+
+        case WasmAddress: {
+            WasmAddressValue* wasmAddress = address->as();
+            Value* pointer = wasmAddress->child(0);
+            ASSERT(Arg::isValidIndexForm(1, offset, width));
+            if (m_locked.contains(pointer))
+                return fallback();
+
+            // FIXME: We should support ARM64 LDR 32-bit addressing, which will
+            // allow us to fuse a Shl ptr, 2 into the address. Additionally, and
+            // perhaps more importantly, it would allow us to avoid a truncating
+            // move. See: https://bugs.webkit.org/show_bug.cgi?id=163465
+
+            return Arg::index(Tmp(wasmAddress->pinnedGPR()), tmp(pointer), 1, offset);
+        }
+
+        default:
+            return fallback();
+        }
+    }
+
+    // This gives you the address of the given Load or Store. If it's not a Load or Store, then
+    // it returns Arg().
+    Arg addr(Value* memoryValue)
+    {
+        MemoryValue* value = memoryValue->as();
+        if (!value)
+            return Arg();
+
+        int32_t offset = value->offset();
+        Arg::Width width = Arg::widthForBytes(value->accessByteSize());
+
+        Arg result = effectiveAddr(value->lastChild(), offset, width);
+        ASSERT(result.isValidForm(width));
+
+        return result;
+    }
+    
+    template
+    Inst trappingInst(bool traps, Args&&... args)
+    {
+        Inst result(std::forward(args)...);
+        result.kind.traps |= traps;
+        return result;
+    }
+    
+    template
+    Inst trappingInst(Value* value, Args&&... args)
+    {
+        return trappingInst(value->traps(), std::forward(args)...);
+    }
+    
+    ArgPromise loadPromiseAnyOpcode(Value* loadValue)
+    {
+        if (!canBeInternal(loadValue))
+            return Arg();
+        if (crossesInterference(loadValue))
+            return Arg();
+        ArgPromise result(addr(loadValue), loadValue);
+        if (loadValue->traps())
+            result.setTraps(true);
+        return result;
+    }
+
+    ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode)
+    {
+        if (loadValue->opcode() != loadOpcode)
+            return Arg();
+        return loadPromiseAnyOpcode(loadValue);
+    }
+
+    ArgPromise loadPromise(Value* loadValue)
+    {
+        return loadPromise(loadValue, Load);
+    }
+
+    Arg imm(int64_t intValue)
+    {
+        if (Arg::isValidImmForm(intValue))
+            return Arg::imm(intValue);
+        return Arg();
+    }
+
+    Arg imm(Value* value)
+    {
+        if (value->hasInt())
+            return imm(value->asInt());
+        return Arg();
+    }
+
+    Arg bitImm(Value* value)
+    {
+        if (value->hasInt()) {
+            int64_t intValue = value->asInt();
+            if (Arg::isValidBitImmForm(intValue))
+                return Arg::bitImm(intValue);
+        }
+        return Arg();
+    }
+
+    Arg bitImm64(Value* value)
+    {
+        if (value->hasInt()) {
+            int64_t intValue = value->asInt();
+            if (Arg::isValidBitImm64Form(intValue))
+                return Arg::bitImm64(intValue);
+        }
+        return Arg();
+    }
+
+    Arg immOrTmp(Value* value)
+    {
+        if (Arg result = imm(value))
+            return result;
+        return tmp(value);
+    }
+
+    // By convention, we use Oops to mean "I don't know".
+    Air::Opcode tryOpcodeForType(
+        Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+    {
+        Air::Opcode opcode;
+        switch (type) {
+        case Int32:
+            opcode = opcode32;
+            break;
+        case Int64:
+            opcode = opcode64;
+            break;
+        case Float:
+            opcode = opcodeFloat;
+            break;
+        case Double:
+            opcode = opcodeDouble;
+            break;
+        default:
+            opcode = Air::Oops;
+            break;
+        }
+
+        return opcode;
+    }
+
+    Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+    {
+        return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+    }
+
+    Air::Opcode opcodeForType(
+        Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type);
+        RELEASE_ASSERT(opcode != Air::Oops);
+        return opcode;
+    }
+
+    Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type)
+    {
+        return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type);
+    }
+
+    template
+    void appendUnOp(Value* value)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type());
+        
+        Tmp result = tmp(m_value);
+
+        // Two operand forms like:
+        //     Op a, b
+        // mean something like:
+        //     b = Op a
+
+        ArgPromise addr = loadPromise(value);
+        if (isValidForm(opcode, addr.kind(), Arg::Tmp)) {
+            append(addr.inst(opcode, m_value, addr.consume(*this), result));
+            return;
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(value), result);
+            return;
+        }
+
+        ASSERT(value->type() == m_value->type());
+        append(relaxedMoveForType(m_value->type()), tmp(value), result);
+        append(opcode, result);
+    }
+
+    // Call this method when doing two-operand lowering of a commutative operation. You have a choice of
+    // which incoming Value is moved into the result. This will select which one is likely to be most
+    // profitable to use as the result. Doing the right thing can have big performance consequences in tight
+    // kernels.
+    bool preferRightForResult(Value* left, Value* right)
+    {
+        // The default is to move left into result, because that's required for non-commutative instructions.
+        // The value that we want to move into result position is the one that dies here. So, if we're
+        // compiling a commutative operation and we know that actually right is the one that dies right here,
+        // then we can flip things around to help coalescing, which then kills the move instruction.
+        //
+        // But it's more complicated:
+        // - Used-once is a bad estimate of whether the variable dies here.
+        // - A child might be a candidate for coalescing with this value.
+        //
+        // Currently, we have machinery in place to recognize super obvious forms of the latter issue.
+        
+        // We recognize when a child is a Phi that has this value as one of its children. We're very
+        // conservative about this; for example we don't even consider transitive Phi children.
+        bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value);
+        bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value);
+
+        if (leftIsPhiWithThis != rightIsPhiWithThis)
+            return rightIsPhiWithThis;
+
+        if (m_useCounts.numUsingInstructions(right) != 1)
+            return false;
+        
+        if (m_useCounts.numUsingInstructions(left) != 1)
+            return true;
+
+        // The use count might be 1 if the variable is live around a loop. We can guarantee that we
+        // pick the the variable that is least likely to suffer this problem if we pick the one that
+        // is closest to us in an idom walk. By convention, we slightly bias this in favor of
+        // returning true.
+
+        // We cannot prefer right if right is further away in an idom walk.
+        if (m_dominators.strictlyDominates(right->owner, left->owner))
+            return false;
+
+        return true;
+    }
+
+    template
+    void appendBinOp(Value* left, Value* right)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type());
+        
+        Tmp result = tmp(m_value);
+        
+        // Three-operand forms like:
+        //     Op a, b, c
+        // mean something like:
+        //     c = a Op b
+
+        if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (imm(right)) {
+                    append(opcode, imm(right), tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (imm(left)) {
+                    append(opcode, imm(left), tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (Arg rightArg = bitImm(right)) {
+                    append(opcode, rightArg, tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (Arg leftArg = bitImm(left)) {
+                    append(opcode, leftArg, tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
+            if (commutativity == Commutative) {
+                if (Arg rightArg = bitImm64(right)) {
+                    append(opcode, rightArg, tmp(left), result);
+                    return;
+                }
+            } else {
+                // A non-commutative operation could have an immediate in left.
+                if (Arg leftArg = bitImm64(left)) {
+                    append(opcode, leftArg, tmp(right), result);
+                    return;
+                }
+            }
+        }
+
+        if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+            append(opcode, tmp(left), imm(right), result);
+            return;
+        }
+
+        // Note that no extant architecture has a three-operand form of binary operations that also
+        // load from memory. If such an abomination did exist, we would handle it somewhere around
+        // here.
+
+        // Two-operand forms like:
+        //     Op a, b
+        // mean something like:
+        //     b = b Op a
+
+        // At this point, we prefer versions of the operation that have a fused load or an immediate
+        // over three operand forms.
+
+        if (left != right) {
+            ArgPromise leftAddr = loadPromise(left);
+            if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+                append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), tmp(right), result));
+                return;
+            }
+
+            if (commutativity == Commutative) {
+                if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) {
+                    append(relaxedMoveForType(m_value->type()), tmp(right), result);
+                    append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), result));
+                    return;
+                }
+            }
+
+            ArgPromise rightAddr = loadPromise(right);
+            if (isValidForm(opcode, Arg::Tmp, rightAddr.kind(), Arg::Tmp)) {
+                append(rightAddr.inst(opcode, m_value, tmp(left), rightAddr.consume(*this), result));
+                return;
+            }
+
+            if (commutativity == Commutative) {
+                if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp, Arg::Tmp)) {
+                    append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), tmp(left), result));
+                    return;
+                }
+            }
+
+            if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) {
+                append(relaxedMoveForType(m_value->type()), tmp(left), result);
+                append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), result));
+                return;
+            }
+        }
+
+        if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+            append(relaxedMoveForType(m_value->type()), tmp(left), result);
+            append(opcode, imm(right), result);
+            return;
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(left), tmp(right), result);
+            return;
+        }
+
+        if (commutativity == Commutative && preferRightForResult(left, right)) {
+            append(relaxedMoveForType(m_value->type()), tmp(right), result);
+            append(opcode, tmp(left), result);
+            return;
+        }
+        
+        append(relaxedMoveForType(m_value->type()), tmp(left), result);
+        append(opcode, tmp(right), result);
+    }
+
+    template
+    void appendBinOp(Value* left, Value* right)
+    {
+        appendBinOp(left, right);
+    }
+
+    template
+    void appendShift(Value* value, Value* amount)
+    {
+        Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type());
+        
+        if (imm(amount)) {
+            if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+                append(opcode, tmp(value), imm(amount), tmp(m_value));
+                return;
+            }
+            if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) {
+                append(Move, tmp(value), tmp(m_value));
+                append(opcode, imm(amount), tmp(m_value));
+                return;
+            }
+        }
+
+        if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+            append(opcode, tmp(value), tmp(amount), tmp(m_value));
+            return;
+        }
+
+#if CPU(X86) || CPU(X86_64)
+        append(Move, tmp(value), tmp(m_value));
+        append(Move, tmp(amount), Tmp(X86Registers::ecx));
+        append(opcode, Tmp(X86Registers::ecx), tmp(m_value));
+#endif
+    }
+
+    template
+    bool tryAppendStoreUnOp(Value* value)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type());
+        if (opcode == Air::Oops)
+            return false;
+        
+        Arg storeAddr = addr(m_value);
+        ASSERT(storeAddr);
+
+        ArgPromise loadPromise = this->loadPromise(value);
+        if (loadPromise.peek() != storeAddr)
+            return false;
+
+        if (!isValidForm(opcode, storeAddr.kind()))
+            return false;
+        
+        loadPromise.consume(*this);
+        append(trappingInst(m_value, loadPromise.inst(opcode, m_value, storeAddr)));
+        return true;
+    }
+
+    template<
+        Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative>
+    bool tryAppendStoreBinOp(Value* left, Value* right)
+    {
+        Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type());
+        if (opcode == Air::Oops)
+            return false;
+        
+        Arg storeAddr = addr(m_value);
+        ASSERT(storeAddr);
+
+        auto getLoadPromise = [&] (Value* load) -> ArgPromise {
+            switch (m_value->opcode()) {
+            case B3::Store:
+                if (load->opcode() != B3::Load)
+                    return ArgPromise();
+                break;
+            case B3::Store8:
+                if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S)
+                    return ArgPromise();
+                break;
+            case B3::Store16:
+                if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S)
+                    return ArgPromise();
+                break;
+            default:
+                return ArgPromise();
+            }
+            return loadPromiseAnyOpcode(load);
+        };
+        
+        ArgPromise loadPromise;
+        Value* otherValue = nullptr;
+
+        loadPromise = getLoadPromise(left);
+        if (loadPromise.peek() == storeAddr)
+            otherValue = right;
+        else if (commutativity == Commutative) {
+            loadPromise = getLoadPromise(right);
+            if (loadPromise.peek() == storeAddr)
+                otherValue = left;
+        }
+
+        if (!otherValue)
+            return false;
+
+        if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) {
+            loadPromise.consume(*this);
+            append(trappingInst(m_value, loadPromise.inst(opcode, m_value, imm(otherValue), storeAddr)));
+            return true;
+        }
+
+        if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind()))
+            return false;
+
+        loadPromise.consume(*this);
+        append(trappingInst(m_value, loadPromise.inst(opcode, m_value, tmp(otherValue), storeAddr)));
+        return true;
+    }
+
+    Inst createStore(Air::Opcode move, Value* value, const Arg& dest)
+    {
+        if (imm(value) && isValidForm(move, Arg::Imm, dest.kind()))
+            return Inst(move, m_value, imm(value), dest);
+
+        return Inst(move, m_value, tmp(value), dest);
+    }
+
+    Inst createStore(Value* value, const Arg& dest)
+    {
+        Air::Opcode moveOpcode = moveForType(value->type());
+        return createStore(moveOpcode, value, dest);
+    }
+
+    template
+    void appendStore(Args&&... args)
+    {
+        append(trappingInst(m_value, createStore(std::forward(args)...)));
+    }
+
+    Air::Opcode moveForType(Type type)
+    {
+        switch (type) {
+        case Int32:
+            return Move32;
+        case Int64:
+            RELEASE_ASSERT(is64Bit());
+            return Move;
+        case Float:
+            return MoveFloat;
+        case Double:
+            return MoveDouble;
+        case Void:
+            break;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return Air::Oops;
+    }
+
+    Air::Opcode relaxedMoveForType(Type type)
+    {
+        switch (type) {
+        case Int32:
+        case Int64:
+            // For Int32, we could return Move or Move32. It's a trade-off.
+            //
+            // Move32: Using Move32 guarantees that we use the narrower move, but in cases where the
+            //     register allocator can't prove that the variables involved are 32-bit, this will
+            //     disable coalescing.
+            //
+            // Move: Using Move guarantees that the register allocator can coalesce normally, but in
+            //     cases where it can't prove that the variables are 32-bit and it doesn't coalesce,
+            //     this will force us to use a full 64-bit Move instead of the slightly cheaper
+            //     32-bit Move32.
+            //
+            // Coalescing is a lot more profitable than turning Move into Move32. So, it's better to
+            // use Move here because in cases where the register allocator cannot prove that
+            // everything is 32-bit, we still get coalescing.
+            return Move;
+        case Float:
+            // MoveFloat is always coalescable and we never convert MoveDouble to MoveFloat, so we
+            // should use MoveFloat when we know that the temporaries involved are 32-bit.
+            return MoveFloat;
+        case Double:
+            return MoveDouble;
+        case Void:
+            break;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return Air::Oops;
+    }
+
+    template
+    void append(Air::Opcode opcode, Arguments&&... arguments)
+    {
+        m_insts.last().append(Inst(opcode, m_value, std::forward(arguments)...));
+    }
+    
+    void append(Inst&& inst)
+    {
+        m_insts.last().append(WTFMove(inst));
+    }
+    void append(const Inst& inst)
+    {
+        m_insts.last().append(inst);
+    }
+
+    template
+    T* ensureSpecial(T*& field, Arguments&&... arguments)
+    {
+        if (!field) {
+            field = static_cast(
+                m_code.addSpecial(std::make_unique(std::forward(arguments)...)));
+        }
+        return field;
+    }
+
+    template
+    CheckSpecial* ensureCheckSpecial(Arguments&&... arguments)
+    {
+        CheckSpecial::Key key(std::forward(arguments)...);
+        auto result = m_checkSpecials.add(key, nullptr);
+        return ensureSpecial(result.iterator->value, key);
+    }
+
+    void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped)
+    {
+        for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) {
+            ConstrainedValue value = stackmap->constrainedChild(i);
+
+            Arg arg;
+            switch (value.rep().kind()) {
+            case ValueRep::WarmAny:
+            case ValueRep::ColdAny:
+            case ValueRep::LateColdAny:
+                if (imm(value.value()))
+                    arg = imm(value.value());
+                else if (value.value()->hasInt64())
+                    arg = Arg::bigImm(value.value()->asInt64());
+                else if (value.value()->hasDouble() && canBeInternal(value.value())) {
+                    commitInternal(value.value());
+                    arg = Arg::bigImm(bitwise_cast(value.value()->asDouble()));
+                } else
+                    arg = tmp(value.value());
+                break;
+            case ValueRep::SomeRegister:
+                arg = tmp(value.value());
+                break;
+            case ValueRep::LateRegister:
+            case ValueRep::Register:
+                stackmap->earlyClobbered().clear(value.rep().reg());
+                arg = Tmp(value.rep().reg());
+                append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), arg);
+                break;
+            case ValueRep::StackArgument:
+                arg = Arg::callArg(value.rep().offsetFromSP());
+                appendStore(value.value(), arg);
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            inst.args.append(arg);
+        }
+    }
+    
+    // Create an Inst to do the comparison specified by the given value.
+    template
+    Inst createGenericCompare(
+        Value* value,
+        const CompareFunctor& compare, // Signature: (Arg::Width, Arg relCond, Arg, Arg) -> Inst
+        const TestFunctor& test, // Signature: (Arg::Width, Arg resCond, Arg, Arg) -> Inst
+        const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+        const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst
+        bool inverted = false)
+    {
+        // NOTE: This is totally happy to match comparisons that have already been computed elsewhere
+        // since on most architectures, the cost of branching on a previously computed comparison
+        // result is almost always higher than just doing another fused compare/branch. The only time
+        // it could be worse is if we have a binary comparison and both operands are variables (not
+        // constants), and we encounter register pressure. Even in this case, duplicating the compare
+        // so that we can fuse it to the branch will be more efficient most of the time, since
+        // register pressure is not *that* common. For this reason, this algorithm will always
+        // duplicate the comparison.
+        //
+        // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we
+        // already validated canBeInternal() on all of the values that got us to the load. So, even
+        // if we are sharing a value, we still need to call canBeInternal() for the purpose of
+        // tracking whether we are still in good shape to fuse loads.
+        //
+        // We could even have a chain of compare values that we fuse, and any member of the chain
+        // could be shared. Once any of them are shared, then the shared one's transitive children
+        // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to
+        // lock all of them because that's a prerequisite to fusing the loads so that the loads don't
+        // get duplicated. For example, we might have: 
+        //
+        //     @tmp1 = LessThan(@a, @b)
+        //     @tmp2 = Equal(@tmp1, 0)
+        //     Branch(@tmp2)
+        //
+        // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they
+        // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot
+        // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that
+        // try to share @tmp1 with us try to do their lowering.
+        //
+        // There's one more wrinkle. If we don't lock an internal value, then this internal value may
+        // have already separately locked its children. So, if we're not locking a value then we need
+        // to make sure that its children aren't locked. We encapsulate this in two ways:
+        //
+        // canCommitInternal: This variable tells us if the values that we've fused so far are
+        // locked. This means that we're not sharing any of them with anyone. This permits us to fuse
+        // loads. If it's false, then we cannot fuse loads and we also need to ensure that the
+        // children of any values we try to fuse-by-sharing are not already locked. You don't have to
+        // worry about the children locking thing if you use prepareToFuse() before trying to fuse a
+        // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal
+        // is true.
+        //
+        // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse
+        // some value and that value is not a load. It will automatically handle the shared-or-locked
+        // issues and it will clear canCommitInternal if necessary. This will return CannotFuse
+        // (which acts like false) if the value cannot be locked and its children are locked. That's
+        // rare, but you just need to make sure that you do smart things when this happens (i.e. just
+        // use the value rather than trying to fuse it). After you call prepareToFuse(), you can
+        // still change your mind about whether you will actually fuse the value. If you do fuse it,
+        // you need to call commitFusion(value, fusionResult).
+        //
+        // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult
+        // is FuseAndCommit.
+        
+        bool canCommitInternal = true;
+
+        enum FusionResult {
+            CannotFuse,
+            FuseAndCommit,
+            Fuse
+        };
+        auto prepareToFuse = [&] (Value* value) -> FusionResult {
+            if (value == m_value) {
+                // It's not actually internal. It's the root value. We're good to go.
+                return Fuse;
+            }
+
+            if (canCommitInternal && canBeInternal(value)) {
+                // We are the only users of this value. This also means that the value's children
+                // could not have been locked, since we have now proved that m_value dominates value
+                // in the data flow graph. To only other way to value is from a user of m_value. If
+                // value's children are shared with others, then they could not have been locked
+                // because their use count is greater than 1. If they are only used from value, then
+                // in order for value's children to be locked, value would also have to be locked,
+                // and we just proved that it wasn't.
+                return FuseAndCommit;
+            }
+
+            // We're going to try to share value with others. It's possible that some other basic
+            // block had already emitted code for value and then matched over its children and then
+            // locked them, in which case we just want to use value instead of duplicating it. So, we
+            // validate the children. Note that this only arises in linear chains like:
+            //
+            //     BB#1:
+            //         @1 = Foo(...)
+            //         @2 = Bar(@1)
+            //         Jump(#2)
+            //     BB#2:
+            //         @3 = Baz(@2)
+            //
+            // Notice how we could start by generating code for BB#1 and then decide to lock @1 when
+            // generating code for @2, if we have some way of fusing Bar and Foo into a single
+            // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now
+            // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us
+            // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end
+            // up deciding not to fuse Foo into the fused Baz/Bar.
+            //
+            // Happily, there are only two places where this kind of child validation happens is in
+            // rules that admit sharing, like this and effectiveAddress().
+            //
+            // N.B. We could probably avoid the need to do value locking if we committed to a well
+            // chosen code generation order. For example, if we guaranteed that all of the users of
+            // a value get generated before that value, then there's no way for the lowering of @3 to
+            // see @1 locked. But we don't want to do that, since this is a greedy instruction
+            // selector and so we want to be able to play with order.
+            for (Value* child : value->children()) {
+                if (m_locked.contains(child))
+                    return CannotFuse;
+            }
+
+            // It's safe to share value, but since we're sharing, it means that we aren't locking it.
+            // If we don't lock it, then fusing loads is off limits and all of value's children will
+            // have to go through the sharing path as well.
+            canCommitInternal = false;
+            
+            return Fuse;
+        };
+
+        auto commitFusion = [&] (Value* value, FusionResult result) {
+            if (result == FuseAndCommit)
+                commitInternal(value);
+        };
+        
+        // Chew through any inversions. This loop isn't necessary for comparisons and branches, but
+        // we do need at least one iteration of it for Check.
+        for (;;) {
+            bool shouldInvert =
+                (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() & 1) && value->child(0)->returnsBool())
+                || (value->opcode() == Equal && value->child(1)->isInt(0));
+            if (!shouldInvert)
+                break;
+
+            FusionResult fusionResult = prepareToFuse(value);
+            if (fusionResult == CannotFuse)
+                break;
+            commitFusion(value, fusionResult);
+            
+            value = value->child(0);
+            inverted = !inverted;
+        }
+
+        auto createRelCond = [&] (
+            MacroAssembler::RelationalCondition relationalCondition,
+            MacroAssembler::DoubleCondition doubleCondition) {
+            Arg relCond = Arg::relCond(relationalCondition).inverted(inverted);
+            Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted);
+            Value* left = value->child(0);
+            Value* right = value->child(1);
+
+            if (isInt(value->child(0)->type())) {
+                // FIXME: We wouldn't have to worry about leftImm if we canonicalized integer
+                // comparisons.
+                // https://bugs.webkit.org/show_bug.cgi?id=150958
+                
+                Arg leftImm = imm(left);
+                Arg rightImm = imm(right);
+
+                auto tryCompare = [&] (
+                    Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+                    if (Inst result = compare(width, relCond, left, right))
+                        return result;
+                    if (Inst result = compare(width, relCond.flipped(), right, left))
+                        return result;
+                    return Inst();
+                };
+
+                auto tryCompareLoadImm = [&] (
+                    Arg::Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst {
+                    if (rightImm && rightImm.isRepresentableAs(width, signedness)) {
+                        if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    if (leftImm && leftImm.isRepresentableAs(width, signedness)) {
+                        if (Inst result = tryCompare(width, leftImm, loadPromise(right, loadOpcode))) {
+                            commitInternal(right);
+                            return result;
+                        }
+                    }
+                    return Inst();
+                };
+
+                Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+                
+                if (canCommitInternal) {
+                    // First handle compares that involve fewer bits than B3's type system supports.
+                    // This is pretty important. For example, we want this to be a single
+                    // instruction:
+                    //
+                    //     @1 = Load8S(...)
+                    //     @2 = Const32(...)
+                    //     @3 = LessThan(@1, @2)
+                    //     Branch(@3)
+                
+                    if (relCond.isSignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width8, Load8S, Arg::Signed))
+                            return result;
+                    }
+                
+                    if (relCond.isUnsignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width8, Load8Z, Arg::Unsigned))
+                            return result;
+                    }
+
+                    if (relCond.isSignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width16, Load16S, Arg::Signed))
+                            return result;
+                    }
+                
+                    if (relCond.isUnsignedCond()) {
+                        if (Inst result = tryCompareLoadImm(Arg::Width16, Load16Z, Arg::Unsigned))
+                            return result;
+                    }
+
+                    // Now handle compares that involve a load and an immediate.
+
+                    if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed))
+                        return result;
+
+                    // Now handle compares that involve a load. It's not obvious that it's better to
+                    // handle this before the immediate cases or not. Probably doesn't matter.
+
+                    if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) {
+                        commitInternal(left);
+                        return result;
+                    }
+                
+                    if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) {
+                        commitInternal(right);
+                        return result;
+                    }
+                }
+
+                // Now handle compares that involve an immediate and a tmp.
+                
+                if (leftImm && leftImm.isRepresentableAs()) {
+                    if (Inst result = tryCompare(width, leftImm, tmpPromise(right)))
+                        return result;
+                }
+                
+                if (rightImm && rightImm.isRepresentableAs()) {
+                    if (Inst result = tryCompare(width, tmpPromise(left), rightImm))
+                        return result;
+                }
+
+                // Finally, handle comparison between tmps.
+                ArgPromise leftPromise = tmpPromise(left);
+                ArgPromise rightPromise = tmpPromise(right);
+                return compare(width, relCond, leftPromise, rightPromise);
+            }
+
+            // Floating point comparisons can't really do anything smart.
+            ArgPromise leftPromise = tmpPromise(left);
+            ArgPromise rightPromise = tmpPromise(right);
+            if (value->child(0)->type() == Float)
+                return compareFloat(doubleCond, leftPromise, rightPromise);
+            return compareDouble(doubleCond, leftPromise, rightPromise);
+        };
+
+        Arg::Width width = Arg::widthForB3Type(value->type());
+        Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted);
+        
+        auto tryTest = [&] (
+            Arg::Width width, ArgPromise&& left, ArgPromise&& right) -> Inst {
+            if (Inst result = test(width, resCond, left, right))
+                return result;
+            if (Inst result = test(width, resCond, right, left))
+                return result;
+            return Inst();
+        };
+
+        auto attemptFused = [&] () -> Inst {
+            switch (value->opcode()) {
+            case NotEqual:
+                return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered);
+            case Equal:
+                return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual);
+            case LessThan:
+                return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan);
+            case GreaterThan:
+                return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan);
+            case LessEqual:
+                return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual);
+            case GreaterEqual:
+                return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual);
+            case EqualOrUnordered:
+                // The integer condition is never used in this case.
+                return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered);
+            case Above:
+                // We use a bogus double condition because these integer comparisons won't got down that
+                // path anyway.
+                return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual);
+            case Below:
+                return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual);
+            case AboveEqual:
+                return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual);
+            case BelowEqual:
+                return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual);
+            case BitAnd: {
+                Value* left = value->child(0);
+                Value* right = value->child(1);
+
+                bool hasRightConst;
+                int64_t rightConst;
+                Arg rightImm;
+                Arg rightImm64;
+
+                hasRightConst = right->hasInt();
+                if (hasRightConst) {
+                    rightConst = right->asInt();
+                    rightImm = bitImm(right);
+                    rightImm64 = bitImm64(right);
+                }
+                
+                auto tryTestLoadImm = [&] (Arg::Width width, Arg::Signedness signedness, B3::Opcode loadOpcode) -> Inst {
+                    if (!hasRightConst)
+                        return Inst();
+                    // Signed loads will create high bits, so if the immediate has high bits
+                    // then we cannot proceed. Consider BitAnd(Load8S(ptr), 0x101). This cannot
+                    // be turned into testb (ptr), $1, since if the high bit within that byte
+                    // was set then it would be extended to include 0x100. The handling below
+                    // won't anticipate this, so we need to catch it here.
+                    if (signedness == Arg::Signed
+                        && !Arg::isRepresentableAs(width, Arg::Unsigned, rightConst))
+                        return Inst();
+                    
+                    // FIXME: If this is unsigned then we can chop things off of the immediate.
+                    // This might make the immediate more legal. Perhaps that's a job for
+                    // strength reduction?
+                    
+                    if (rightImm) {
+                        if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    if (rightImm64) {
+                        if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm64)) {
+                            commitInternal(left);
+                            return result;
+                        }
+                    }
+                    return Inst();
+                };
+
+                if (canCommitInternal) {
+                    // First handle test's that involve fewer bits than B3's type system supports.
+
+                    if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Unsigned, Load8Z))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width8, Arg::Signed, Load8S))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Unsigned, Load16Z))
+                        return result;
+                    
+                    if (Inst result = tryTestLoadImm(Arg::Width16, Arg::Signed, Load16S))
+                        return result;
+
+                    // This allows us to use a 32-bit test for 64-bit BitAnd if the immediate is
+                    // representable as an unsigned 32-bit value. The logic involved is the same
+                    // as if we were pondering using a 32-bit test for
+                    // BitAnd(SExt(Load(ptr)), const), in the sense that in both cases we have
+                    // to worry about high bits. So, we use the "Signed" version of this helper.
+                    if (Inst result = tryTestLoadImm(Arg::Width32, Arg::Signed, Load))
+                        return result;
+                    
+                    // This is needed to handle 32-bit test for arbitrary 32-bit immediates.
+                    if (Inst result = tryTestLoadImm(width, Arg::Unsigned, Load))
+                        return result;
+                    
+                    // Now handle test's that involve a load.
+                    
+                    Arg::Width width = Arg::widthForB3Type(value->child(0)->type());
+                    if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) {
+                        commitInternal(left);
+                        return result;
+                    }
+                    
+                    if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) {
+                        commitInternal(right);
+                        return result;
+                    }
+                }
+
+                // Now handle test's that involve an immediate and a tmp.
+
+                if (hasRightConst) {
+                    if ((width == Arg::Width32 && rightConst == 0xffffffff)
+                        || (width == Arg::Width64 && rightConst == -1)) {
+                        if (Inst result = tryTest(width, tmpPromise(left), tmpPromise(left)))
+                            return result;
+                    }
+                    if (isRepresentableAs(rightConst)) {
+                        if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm))
+                            return result;
+                        if (Inst result = tryTest(Arg::Width32, tmpPromise(left), rightImm64))
+                            return result;
+                    }
+                    if (Inst result = tryTest(width, tmpPromise(left), rightImm))
+                        return result;
+                    if (Inst result = tryTest(width, tmpPromise(left), rightImm64))
+                        return result;
+                }
+
+                // Finally, just do tmp's.
+                return tryTest(width, tmpPromise(left), tmpPromise(right));
+            }
+            default:
+                return Inst();
+            }
+        };
+
+        if (FusionResult fusionResult = prepareToFuse(value)) {
+            if (Inst result = attemptFused()) {
+                commitFusion(value, fusionResult);
+                return result;
+            }
+        }
+
+        if (Arg::isValidBitImmForm(-1)) {
+            if (canCommitInternal && value->as()) {
+                // Handle things like Branch(Load8Z(value))
+
+                if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8Z), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width8, loadPromise(value, Load8S), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16Z), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(Arg::Width16, loadPromise(value, Load16S), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+
+                if (Inst result = tryTest(width, loadPromise(value), Arg::bitImm(-1))) {
+                    commitInternal(value);
+                    return result;
+                }
+            }
+
+            ArgPromise leftPromise = tmpPromise(value);
+            ArgPromise rightPromise = Arg::bitImm(-1);
+            if (Inst result = test(width, resCond, leftPromise, rightPromise))
+                return result;
+        }
+        
+        // Sometimes this is the only form of test available. We prefer not to use this because
+        // it's less canonical.
+        ArgPromise leftPromise = tmpPromise(value);
+        ArgPromise rightPromise = tmpPromise(value);
+        return test(width, resCond, leftPromise, rightPromise);
+    }
+
+    Inst createBranch(Value* value, bool inverted = false)
+    {
+        return createGenericCompare(
+            value,
+            [this] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch8, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch32, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            Branch64, m_value, relCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest8, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest32, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) {
+                        return left.inst(right.inst(
+                            BranchTest64, m_value, resCond,
+                            left.consume(*this), right.consume(*this)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) {
+                    return left.inst(right.inst(
+                        BranchDouble, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this)));
+                }
+                return Inst();
+            },
+            [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) {
+                    return left.inst(right.inst(
+                        BranchFloat, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this)));
+                }
+                return Inst();
+            },
+            inverted);
+    }
+
+    Inst createCompare(Value* value, bool inverted = false)
+    {
+        return createGenericCompare(
+            value,
+            [this] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Compare32, m_value, relCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Compare64, m_value, relCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Test32, m_value, resCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                case Arg::Width64:
+                    if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) {
+                        return left.inst(right.inst(
+                            Test64, m_value, resCond,
+                            left.consume(*this), right.consume(*this), tmp(m_value)));
+                    }
+                    return Inst();
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+                    return left.inst(right.inst(
+                        CompareDouble, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this), tmp(m_value)));
+                }
+                return Inst();
+            },
+            [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) {
+                    return left.inst(right.inst(
+                        CompareFloat, m_value, doubleCond,
+                        left.consume(*this), right.consume(*this), tmp(m_value)));
+                }
+                return Inst();
+            },
+            inverted);
+    }
+
+    struct MoveConditionallyConfig {
+        Air::Opcode moveConditionally32;
+        Air::Opcode moveConditionally64;
+        Air::Opcode moveConditionallyTest32;
+        Air::Opcode moveConditionallyTest64;
+        Air::Opcode moveConditionallyDouble;
+        Air::Opcode moveConditionallyFloat;
+    };
+    Inst createSelect(const MoveConditionallyConfig& config)
+    {
+        auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, ArgPromise& left, ArgPromise& right) -> Inst {
+            if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Tmp result = tmp(m_value);
+                Tmp thenCase = tmp(m_value->child(1));
+                Tmp elseCase = tmp(m_value->child(2));
+                return left.inst(right.inst(
+                    opcode, m_value, condition,
+                    left.consume(*this), right.consume(*this), thenCase, elseCase, result));
+            }
+            if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) {
+                Tmp result = tmp(m_value);
+                Tmp source = tmp(m_value->child(1));
+                append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result);
+                return left.inst(right.inst(
+                    opcode, m_value, condition,
+                    left.consume(*this), right.consume(*this), source, result));
+            }
+            return Inst();
+        };
+
+        return createGenericCompare(
+            m_value->child(0),
+            [&] (
+                Arg::Width width, const Arg& relCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    // FIXME: Support these things.
+                    // https://bugs.webkit.org/show_bug.cgi?id=151504
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    return createSelectInstruction(config.moveConditionally32, relCond, left, right);
+                case Arg::Width64:
+                    return createSelectInstruction(config.moveConditionally64, relCond, left, right);
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [&] (
+                Arg::Width width, const Arg& resCond,
+                ArgPromise& left, ArgPromise& right) -> Inst {
+                switch (width) {
+                case Arg::Width8:
+                    // FIXME: Support more things.
+                    // https://bugs.webkit.org/show_bug.cgi?id=151504
+                    return Inst();
+                case Arg::Width16:
+                    return Inst();
+                case Arg::Width32:
+                    return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right);
+                case Arg::Width64:
+                    return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right);
+                }
+                ASSERT_NOT_REACHED();
+            },
+            [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right);
+            },
+            [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst {
+                return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right);
+            },
+            false);
+    }
+    
+    bool tryAppendLea()
+    {
+        Air::Opcode leaOpcode = tryOpcodeForType(Lea32, Lea64, m_value->type());
+        if (!isValidForm(leaOpcode, Arg::Index, Arg::Tmp))
+            return false;
+        
+        // This lets us turn things like this:
+        //
+        //     Add(Add(@x, Shl(@y, $2)), $100)
+        //
+        // Into this:
+        //
+        //     lea 100(%rdi,%rsi,4), %rax
+        //
+        // We have a choice here between committing the internal bits of an index or sharing
+        // them. There are solid arguments for both.
+        //
+        // Sharing: The word on the street is that the cost of a lea is one cycle no matter
+        // what it does. Every experiment I've ever seen seems to confirm this. So, sharing
+        // helps us in situations where Wasm input did this:
+        //
+        //     x = a[i].x;
+        //     y = a[i].y;
+        //
+        // With sharing we would do:
+        //
+        //     leal (%a,%i,4), %tmp
+        //     cmp (%size, %tmp)
+        //     ja _fail
+        //     movl (%base, %tmp), %x
+        //     leal 4(%a,%i,4), %tmp
+        //     cmp (%size, %tmp)
+        //     ja _fail
+        //     movl (%base, %tmp), %y
+        //
+        // In the absence of sharing, we may find ourselves needing separate registers for
+        // the innards of the index. That's relatively unlikely to be a thing due to other
+        // optimizations that we already have, but it could happen
+        //
+        // Committing: The worst case is that there is a complicated graph of additions and
+        // shifts, where each value has multiple uses. In that case, it's better to compute
+        // each one separately from the others since that way, each calculation will use a
+        // relatively nearby tmp as its input. That seems uncommon, but in those cases,
+        // committing is a clear winner: it would result in a simple interference graph
+        // while sharing would result in a complex one. Interference sucks because it means
+        // more time in IRC and it means worse code.
+        //
+        // It's not super clear if any of these corner cases would ever arise. Committing
+        // has the benefit that it's easier to reason about, and protects a much darker
+        // corner case (more interference).
+                
+        // Here are the things we want to match:
+        // Add(Add(@x, @y), $c)
+        // Add(Shl(@x, $c), @y)
+        // Add(@x, Shl(@y, $c))
+        // Add(Add(@x, Shl(@y, $c)), $d)
+        // Add(Add(Shl(@x, $c), @y), $d)
+        //
+        // Note that if you do Add(Shl(@x, $c), $d) then we will treat $d as a non-constant and
+        // force it to materialize. You'll get something like this:
+        //
+        // movl $d, %tmp
+        // leal (%tmp,%x,1<child(1)->isRepresentableAs()
+            && canBeInternal(value->child(0))
+            && value->child(0)->opcode() == Add) {
+            innerAdd = value->child(0);
+            offset = static_cast(value->child(1)->asInt());
+            value = value->child(0);
+        }
+        
+        auto tryShl = [&] (Value* shl, Value* other) -> bool {
+            std::optional scale = scaleForShl(shl, offset);
+            if (!scale)
+                return false;
+            if (!canBeInternal(shl))
+                return false;
+            
+            ASSERT(!m_locked.contains(shl->child(0)));
+            ASSERT(!m_locked.contains(other));
+            
+            append(leaOpcode, Arg::index(tmp(other), tmp(shl->child(0)), *scale, offset), tmp(m_value));
+            commitInternal(innerAdd);
+            commitInternal(shl);
+            return true;
+        };
+        
+        if (tryShl(value->child(0), value->child(1)))
+            return true;
+        if (tryShl(value->child(1), value->child(0)))
+            return true;
+        
+        // The remaining pattern is just:
+        // Add(@x, @y) (only if offset != 0)
+        if (!offset)
+            return false;
+        ASSERT(!m_locked.contains(value->child(0)));
+        ASSERT(!m_locked.contains(value->child(1)));
+        append(leaOpcode, Arg::index(tmp(value->child(0)), tmp(value->child(1)), 1, offset), tmp(m_value));
+        commitInternal(innerAdd);
+        return true;
+    }
+
+    void lower()
+    {
+        switch (m_value->opcode()) {
+        case B3::Nop: {
+            // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of
+            // properly removing things.
+            return;
+        }
+            
+        case Load: {
+            append(trappingInst(m_value, moveForType(m_value->type()), m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+            
+        case Load8S: {
+            append(trappingInst(m_value, Load8SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load8Z: {
+            append(trappingInst(m_value, Load8, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load16S: {
+            append(trappingInst(m_value, Load16SignedExtendTo32, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Load16Z: {
+            append(trappingInst(m_value, Load16, m_value, addr(m_value), tmp(m_value)));
+            return;
+        }
+
+        case Add: {
+            if (tryAppendLea())
+                return;
+            
+            Air::Opcode multiplyAddOpcode = tryOpcodeForType(MultiplyAdd32, MultiplyAdd64, m_value->type());
+            if (isValidForm(multiplyAddOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Value* left = m_value->child(0);
+                Value* right = m_value->child(1);
+                if (!imm(right) || m_valueToTmp[right]) {
+                    auto tryAppendMultiplyAdd = [&] (Value* left, Value* right) -> bool {
+                        if (left->opcode() != Mul || !canBeInternal(left))
+                            return false;
+
+                        Value* multiplyLeft = left->child(0);
+                        Value* multiplyRight = left->child(1);
+                        if (canBeInternal(multiplyLeft) || canBeInternal(multiplyRight))
+                            return false;
+
+                        append(multiplyAddOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(right), tmp(m_value));
+                        commitInternal(left);
+
+                        return true;
+                    };
+
+                    if (tryAppendMultiplyAdd(left, right))
+                        return;
+                    if (tryAppendMultiplyAdd(right, left))
+                        return;
+                }
+            }
+
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Sub: {
+            Air::Opcode multiplySubOpcode = tryOpcodeForType(MultiplySub32, MultiplySub64, m_value->type());
+            if (multiplySubOpcode != Air::Oops
+                && isValidForm(multiplySubOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                Value* left = m_value->child(0);
+                Value* right = m_value->child(1);
+                if (!imm(right) || m_valueToTmp[right]) {
+                    auto tryAppendMultiplySub = [&] () -> bool {
+                        if (right->opcode() != Mul || !canBeInternal(right))
+                            return false;
+
+                        Value* multiplyLeft = right->child(0);
+                        Value* multiplyRight = right->child(1);
+                        if (m_locked.contains(multiplyLeft) || m_locked.contains(multiplyRight))
+                            return false;
+
+                        append(multiplySubOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(left), tmp(m_value));
+                        commitInternal(right);
+
+                        return true;
+                    };
+
+                    if (tryAppendMultiplySub())
+                        return;
+                }
+            }
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Neg: {
+            Air::Opcode multiplyNegOpcode = tryOpcodeForType(MultiplyNeg32, MultiplyNeg64, m_value->type());
+            if (multiplyNegOpcode != Air::Oops
+                && isValidForm(multiplyNegOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)
+                && m_value->child(0)->opcode() == Mul
+                && canBeInternal(m_value->child(0))) {
+                Value* multiplyOperation = m_value->child(0);
+                Value* multiplyLeft = multiplyOperation->child(0);
+                Value* multiplyRight = multiplyOperation->child(1);
+                if (!m_locked.contains(multiplyLeft) && !m_locked.contains(multiplyRight)) {
+                    append(multiplyNegOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(m_value));
+                    commitInternal(multiplyOperation);
+                    return;
+                }
+            }
+
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Mul: {
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Div: {
+            if (m_value->isChill())
+                RELEASE_ASSERT(isARM64());
+            if (isInt(m_value->type()) && isX86()) {
+                lowerX86Div(Div);
+                return;
+            }
+            ASSERT(!isX86() || isFloat(m_value->type()));
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case UDiv: {
+            if (isInt(m_value->type()) && isX86()) {
+                lowerX86UDiv(UDiv);
+                return;
+            }
+
+            ASSERT(!isX86() && !isFloat(m_value->type()));
+
+            appendBinOp(m_value->child(0), m_value->child(1));
+            return;
+
+        }
+
+        case Mod: {
+            RELEASE_ASSERT(isX86());
+            RELEASE_ASSERT(!m_value->isChill());
+            lowerX86Div(Mod);
+            return;
+        }
+
+        case UMod: {
+            RELEASE_ASSERT(isX86());
+            lowerX86UDiv(UMod);
+            return;
+        }
+
+        case BitAnd: {
+            if (m_value->child(1)->isInt(0xff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            
+            if (m_value->child(1)->isInt(0xffff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+
+            if (m_value->child(1)->isInt(0xffffffff)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case BitOr: {
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case BitXor: {
+            // FIXME: If canBeInternal(child), we should generate this using the comparison path.
+            // https://bugs.webkit.org/show_bug.cgi?id=152367
+            
+            if (m_value->child(1)->isInt(-1)) {
+                appendUnOp(m_value->child(0));
+                return;
+            }
+            appendBinOp(
+                m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Shl: {
+            if (m_value->child(1)->isInt32(1)) {
+                appendBinOp(m_value->child(0), m_value->child(0));
+                return;
+            }
+            
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case SShr: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case ZShr: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case RotR: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case RotL: {
+            appendShift(m_value->child(0), m_value->child(1));
+            return;
+        }
+
+        case Clz: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Abs: {
+            RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation.");
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Ceil: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Floor: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Sqrt: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case BitwiseCast: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case Store: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case Sub:
+                    if (valueToStore->child(0)->isInt(0)) {
+                        matched = tryAppendStoreUnOp(valueToStore->child(1));
+                        break;
+                    }
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case BitAnd:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                case BitXor:
+                    if (valueToStore->child(1)->isInt(-1)) {
+                        matched = tryAppendStoreUnOp(valueToStore->child(0));
+                        break;
+                    }
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+
+            appendStore(valueToStore, addr(m_value));
+            return;
+        }
+
+        case B3::Store8: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+            appendStore(Air::Store8, valueToStore, addr(m_value));
+            return;
+        }
+
+        case B3::Store16: {
+            Value* valueToStore = m_value->child(0);
+            if (canBeInternal(valueToStore)) {
+                bool matched = false;
+                switch (valueToStore->opcode()) {
+                case Add:
+                    matched = tryAppendStoreBinOp(
+                        valueToStore->child(0), valueToStore->child(1));
+                    break;
+                default:
+                    break;
+                }
+                if (matched) {
+                    commitInternal(valueToStore);
+                    return;
+                }
+            }
+            appendStore(Air::Store16, valueToStore, addr(m_value));
+            return;
+        }
+
+        case WasmAddress: {
+            WasmAddressValue* address = m_value->as();
+
+            append(Add64, Arg(address->pinnedGPR()), tmp(address));
+            return;
+        }
+
+        case Fence: {
+            FenceValue* fence = m_value->as();
+            if (!fence->write && !fence->read)
+                return;
+            if (!fence->write) {
+                // A fence that reads but does not write is for protecting motion of stores.
+                append(StoreFence);
+                return;
+            }
+            if (!fence->read) {
+                // A fence that writes but does not read is for protecting motion of loads.
+                append(LoadFence);
+                return;
+            }
+            append(MemoryFence);
+            return;
+        }
+
+        case Trunc: {
+            ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+            return;
+        }
+
+        case SExt8: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case SExt16: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case ZExt32: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case SExt32: {
+            // FIXME: We should have support for movsbq/movswq
+            // https://bugs.webkit.org/show_bug.cgi?id=152232
+            
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case FloatToDouble: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case DoubleToFloat: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case ArgumentReg: {
+            m_prologue.append(Inst(
+                moveForType(m_value->type()), m_value,
+                Tmp(m_value->as()->argumentReg()),
+                tmp(m_value)));
+            return;
+        }
+
+        case Const32:
+        case Const64: {
+            if (imm(m_value))
+                append(Move, imm(m_value), tmp(m_value));
+            else
+                append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
+            return;
+        }
+
+        case ConstDouble:
+        case ConstFloat: {
+            // We expect that the moveConstants() phase has run, and any doubles referenced from
+            // stackmaps get fused.
+            RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0));
+            RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0f));
+            append(MoveZeroToDouble, tmp(m_value));
+            return;
+        }
+
+        case FramePointer: {
+            ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister));
+            return;
+        }
+
+        case SlotBase: {
+            append(
+                pointerType() == Int64 ? Lea64 : Lea32,
+                Arg::stack(m_stackToStack.get(m_value->as()->slot())),
+                tmp(m_value));
+            return;
+        }
+
+        case Equal:
+        case NotEqual:
+        case LessThan:
+        case GreaterThan:
+        case LessEqual:
+        case GreaterEqual:
+        case Above:
+        case Below:
+        case AboveEqual:
+        case BelowEqual:
+        case EqualOrUnordered: {
+            m_insts.last().append(createCompare(m_value));
+            return;
+        }
+
+        case Select: {
+            MoveConditionallyConfig config;
+            if (isInt(m_value->type())) {
+                config.moveConditionally32 = MoveConditionally32;
+                config.moveConditionally64 = MoveConditionally64;
+                config.moveConditionallyTest32 = MoveConditionallyTest32;
+                config.moveConditionallyTest64 = MoveConditionallyTest64;
+                config.moveConditionallyDouble = MoveConditionallyDouble;
+                config.moveConditionallyFloat = MoveConditionallyFloat;
+            } else {
+                // FIXME: it's not obvious that these are particularly efficient.
+                config.moveConditionally32 = MoveDoubleConditionally32;
+                config.moveConditionally64 = MoveDoubleConditionally64;
+                config.moveConditionallyTest32 = MoveDoubleConditionallyTest32;
+                config.moveConditionallyTest64 = MoveDoubleConditionallyTest64;
+                config.moveConditionallyDouble = MoveDoubleConditionallyDouble;
+                config.moveConditionallyFloat = MoveDoubleConditionallyFloat;
+            }
+            
+            m_insts.last().append(createSelect(config));
+            return;
+        }
+
+        case IToD: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case IToF: {
+            appendUnOp(m_value->child(0));
+            return;
+        }
+
+        case B3::CCall: {
+            CCallValue* cCall = m_value->as();
+
+            Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall);
+
+            // We have a ton of flexibility regarding the callee argument, but currently, we don't
+            // use it yet. It gets weird for reasons:
+            // 1) We probably will never take advantage of this. We don't have C calls to locations
+            //    loaded from addresses. We have JS calls like that, but those use Patchpoints.
+            // 2) On X86_64 we still don't support call with BaseIndex.
+            // 3) On non-X86, we don't natively support any kind of loading from address.
+            // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to
+            //    decide.
+            // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052
+            inst.args.append(tmp(cCall->child(0)));
+
+            if (cCall->type() != Void)
+                inst.args.append(tmp(cCall));
+
+            for (unsigned i = 1; i < cCall->numChildren(); ++i)
+                inst.args.append(immOrTmp(cCall->child(i)));
+
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case Patchpoint: {
+            PatchpointValue* patchpointValue = m_value->as();
+            ensureSpecial(m_patchpointSpecial);
+            
+            Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial));
+
+            Vector after;
+            if (patchpointValue->type() != Void) {
+                switch (patchpointValue->resultConstraint.kind()) {
+                case ValueRep::WarmAny:
+                case ValueRep::ColdAny:
+                case ValueRep::LateColdAny:
+                case ValueRep::SomeRegister:
+                case ValueRep::SomeEarlyRegister:
+                    inst.args.append(tmp(patchpointValue));
+                    break;
+                case ValueRep::Register: {
+                    Tmp reg = Tmp(patchpointValue->resultConstraint.reg());
+                    inst.args.append(reg);
+                    after.append(Inst(
+                        relaxedMoveForType(patchpointValue->type()), m_value, reg, tmp(patchpointValue)));
+                    break;
+                }
+                case ValueRep::StackArgument: {
+                    Arg arg = Arg::callArg(patchpointValue->resultConstraint.offsetFromSP());
+                    inst.args.append(arg);
+                    after.append(Inst(
+                        moveForType(patchpointValue->type()), m_value, arg, tmp(patchpointValue)));
+                    break;
+                }
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                    break;
+                }
+            }
+            
+            fillStackmap(inst, patchpointValue, 0);
+            
+            if (patchpointValue->resultConstraint.isReg())
+                patchpointValue->lateClobbered().clear(patchpointValue->resultConstraint.reg());
+
+            for (unsigned i = patchpointValue->numGPScratchRegisters; i--;)
+                inst.args.append(m_code.newTmp(Arg::GP));
+            for (unsigned i = patchpointValue->numFPScratchRegisters; i--;)
+                inst.args.append(m_code.newTmp(Arg::FP));
+            
+            m_insts.last().append(WTFMove(inst));
+            m_insts.last().appendVector(after);
+            return;
+        }
+
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul: {
+            CheckValue* checkValue = m_value->as();
+
+            Value* left = checkValue->child(0);
+            Value* right = checkValue->child(1);
+
+            Tmp result = tmp(m_value);
+
+            // Handle checked negation.
+            if (checkValue->opcode() == CheckSub && left->isInt(0)) {
+                append(Move, tmp(right), result);
+
+                Air::Opcode opcode =
+                    opcodeForType(BranchNeg32, BranchNeg64, checkValue->type());
+                CheckSpecial* special = ensureCheckSpecial(opcode, 2);
+
+                Inst inst(Patch, checkValue, Arg::special(special));
+                inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+                inst.args.append(result);
+
+                fillStackmap(inst, checkValue, 2);
+
+                m_insts.last().append(WTFMove(inst));
+                return;
+            }
+
+            Air::Opcode opcode = Air::Oops;
+            Commutativity commutativity = NotCommutative;
+            StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep;
+            switch (m_value->opcode()) {
+            case CheckAdd:
+                opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type());
+                stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable;
+                commutativity = Commutative;
+                break;
+            case CheckSub:
+                opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type());
+                break;
+            case CheckMul:
+                opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type());
+                stackmapRole = StackmapSpecial::ForceLateUse;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+
+            // FIXME: It would be great to fuse Loads into these. We currently don't do it because the
+            // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax
+            // this rule here.
+            // https://bugs.webkit.org/show_bug.cgi?id=151228
+
+            Vector sources;
+            if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(imm(right));
+            } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) {
+                sources.append(imm(right));
+                append(Move, tmp(left), result);
+            } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(tmp(right));
+            }  else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) {
+                if (commutativity == Commutative && preferRightForResult(left, right)) {
+                    sources.append(tmp(left));
+                    append(Move, tmp(right), result);
+                } else {
+                    sources.append(tmp(right));
+                    append(Move, tmp(left), result);
+                }
+            } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) {
+                sources.append(tmp(left));
+                sources.append(tmp(right));
+                sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+                sources.append(m_code.newTmp(Arg::typeForB3Type(m_value->type())));
+            }
+
+            // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit
+            // such code, but the coalescing in our register allocator also does copy propagation, so
+            // although we emit:
+            //
+            //     Move %tmp1, %tmp2
+            //     BranchAdd32 %tmp1, %tmp2
+            //
+            // The register allocator may turn this into:
+            //
+            //     BranchAdd32 %rax, %rax
+            //
+            // Currently we handle this by ensuring that even this kind of addition can be undone. We can
+            // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this
+            // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of
+            // performance. So it's tempting to do it only if left == right. But that creates an awkward
+            // constraint on Air: it means that Air would not be allowed to do any copy propagation.
+            // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are
+            // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we
+            // emit something like a Move - or even do other kinds of copy-propagation on tmp's -
+            // somewhere else in this code. The add-to-self situation may only emerge after some other Air
+            // optimizations remove other Move's or identity-like operations. That's why we don't use
+            // LateUse here to take care of add-to-self.
+            
+            CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole);
+            
+            Inst inst(Patch, checkValue, Arg::special(special));
+
+            inst.args.append(Arg::resCond(MacroAssembler::Overflow));
+
+            inst.args.appendVector(sources);
+            inst.args.append(result);
+
+            fillStackmap(inst, checkValue, 2);
+
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case Check: {
+            Inst branch = createBranch(m_value->child(0));
+
+            CheckSpecial* special = ensureCheckSpecial(branch);
+            
+            CheckValue* checkValue = m_value->as();
+            
+            Inst inst(Patch, checkValue, Arg::special(special));
+            inst.args.appendVector(branch.args);
+            
+            fillStackmap(inst, checkValue, 1);
+            
+            m_insts.last().append(WTFMove(inst));
+            return;
+        }
+
+        case B3::WasmBoundsCheck: {
+            WasmBoundsCheckValue* value = m_value->as();
+
+            Value* ptr = value->child(0);
+
+            Arg temp = m_code.newTmp(Arg::GP);
+            append(Inst(Move32, value, tmp(ptr), temp));
+            if (value->offset()) {
+                if (imm(value->offset()))
+                    append(Add64, imm(value->offset()), temp);
+                else {
+                    Arg bigImm = m_code.newTmp(Arg::GP);
+                    append(Move, Arg::bigImm(value->offset()), bigImm);
+                    append(Add64, bigImm, temp);
+                }
+            }
+            append(Inst(Air::WasmBoundsCheck, value, temp, Arg(value->pinnedGPR())));
+            return;
+        }
+
+        case Upsilon: {
+            Value* value = m_value->child(0);
+            append(
+                relaxedMoveForType(value->type()), immOrTmp(value),
+                m_phiToTmp[m_value->as()->phi()]);
+            return;
+        }
+
+        case Phi: {
+            // Snapshot the value of the Phi. It may change under us because you could do:
+            // a = Phi()
+            // Upsilon(@x, ^a)
+            // @a => this should get the value of the Phi before the Upsilon, i.e. not @x.
+
+            append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value));
+            return;
+        }
+
+        case Set: {
+            Value* value = m_value->child(0);
+            append(
+                relaxedMoveForType(value->type()), immOrTmp(value),
+                m_variableToTmp.get(m_value->as()->variable()));
+            return;
+        }
+
+        case Get: {
+            append(
+                relaxedMoveForType(m_value->type()),
+                m_variableToTmp.get(m_value->as()->variable()), tmp(m_value));
+            return;
+        }
+
+        case Branch: {
+            m_insts.last().append(createBranch(m_value->child(0)));
+            return;
+        }
+
+        case B3::Jump: {
+            append(Air::Jump);
+            return;
+        }
+            
+        case Identity: {
+            ASSERT(tmp(m_value->child(0)) == tmp(m_value));
+            return;
+        }
+
+        case Return: {
+            if (!m_value->numChildren()) {
+                append(RetVoid);
+                return;
+            }
+            Value* value = m_value->child(0);
+            Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR);
+            Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR);
+            switch (value->type()) {
+            case Void:
+                // It's impossible for a void value to be used as a child. If we did want to have a
+                // void return, we'd introduce a different opcode, like ReturnVoid.
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            case Int32:
+                append(Move, immOrTmp(value), returnValueGPR);
+                append(Ret32, returnValueGPR);
+                break;
+            case Int64:
+                append(Move, immOrTmp(value), returnValueGPR);
+                append(Ret64, returnValueGPR);
+                break;
+            case Float:
+                append(MoveFloat, tmp(value), returnValueFPR);
+                append(RetFloat, returnValueFPR);
+                break;
+            case Double:
+                append(MoveDouble, tmp(value), returnValueFPR);
+                append(RetDouble, returnValueFPR);
+                break;
+            }
+            return;
+        }
+
+        case B3::Oops: {
+            append(Air::Oops);
+            return;
+        }
+            
+        case B3::EntrySwitch: {
+            append(Air::EntrySwitch);
+            return;
+        }
+
+        default:
+            break;
+        }
+
+        dataLog("FATAL: could not lower ", deepDump(m_procedure, m_value), "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    void lowerX86Div(B3::Opcode op)
+    {
+#if CPU(X86) || CPU(X86_64)
+        Tmp eax = Tmp(X86Registers::eax);
+        Tmp edx = Tmp(X86Registers::edx);
+
+        Air::Opcode convertToDoubleWord;
+        Air::Opcode div;
+        switch (m_value->type()) {
+        case Int32:
+            convertToDoubleWord = X86ConvertToDoubleWord32;
+            div = X86Div32;
+            break;
+        case Int64:
+            convertToDoubleWord = X86ConvertToQuadWord64;
+            div = X86Div64;
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return;
+        }
+
+        ASSERT(op == Div || op == Mod);
+        X86Registers::RegisterID result = op == Div ? X86Registers::eax : X86Registers::edx;
+
+        append(Move, tmp(m_value->child(0)), eax);
+        append(convertToDoubleWord, eax, edx);
+        append(div, eax, edx, tmp(m_value->child(1)));
+        append(Move, Tmp(result), tmp(m_value));
+
+#else
+        UNUSED_PARAM(op);
+        UNREACHABLE_FOR_PLATFORM();
+#endif
+    }
+
+    void lowerX86UDiv(B3::Opcode op)
+    {
+#if CPU(X86) || CPU(X86_64)
+        Tmp eax = Tmp(X86Registers::eax);
+        Tmp edx = Tmp(X86Registers::edx);
+
+        Air::Opcode div = m_value->type() == Int32 ? X86UDiv32 : X86UDiv64;
+
+        ASSERT(op == UDiv || op == UMod);
+        X86Registers::RegisterID result = op == UDiv ? X86Registers::eax : X86Registers::edx;
+
+        append(Move, tmp(m_value->child(0)), eax);
+        append(Xor64, edx, edx);
+        append(div, eax, edx, tmp(m_value->child(1)));
+        append(Move, Tmp(result), tmp(m_value));
+#else
+        UNUSED_PARAM(op);
+        UNREACHABLE_FOR_PLATFORM();
+#endif
+    }
+
+    IndexSet m_locked; // These are values that will have no Tmp in Air.
+    IndexMap m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned".
+    IndexMap m_phiToTmp; // Each Phi gets its own Tmp.
+    IndexMap m_blockToBlock;
+    HashMap m_stackToStack;
+    HashMap m_variableToTmp;
+
+    UseCounts m_useCounts;
+    PhiChildren m_phiChildren;
+    BlockWorklist m_fastWorklist;
+    Dominators& m_dominators;
+
+    Vector> m_insts;
+    Vector m_prologue;
+
+    B3::BasicBlock* m_block;
+    bool m_isRare;
+    unsigned m_index;
+    Value* m_value;
+
+    PatchpointSpecial* m_patchpointSpecial { nullptr };
+    HashMap m_checkSpecials;
+
+    Procedure& m_procedure;
+    Code& m_code;
+};
+
+} // anonymous namespace
+
+void lowerToAir(Procedure& procedure)
+{
+    PhaseScope phaseScope(procedure, "lowerToAir");
+    LowerToAir lowerToAir(procedure);
+    lowerToAir.run();
+}
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3LowerToAir.h b/b3/B3LowerToAir.h
new file mode 100644
index 0000000..a668376
--- /dev/null
+++ b/b3/B3LowerToAir.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+namespace Air { class Code; }
+
+// This lowers the current B3 procedure to an Air code.
+
+JS_EXPORT_PRIVATE void lowerToAir(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3MathExtras.cpp b/b3/B3MathExtras.cpp
new file mode 100644
index 0000000..1c99379
--- /dev/null
+++ b/b3/B3MathExtras.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3MathExtras.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Const32Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstPtrValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "MathCommon.h"
+
+namespace JSC { namespace B3 {
+
+std::pair powDoubleInt32(Procedure& procedure, BasicBlock* start, Origin origin, Value* x, Value* y)
+{
+    BasicBlock* functionCallCase = procedure.addBlock();
+    BasicBlock* loopPreHeaderCase = procedure.addBlock();
+    BasicBlock* loopTestForEvenCase = procedure.addBlock();
+    BasicBlock* loopOdd = procedure.addBlock();
+    BasicBlock* loopEvenOdd = procedure.addBlock();
+    BasicBlock* continuation = procedure.addBlock();
+
+    Value* shouldGoSlowPath = start->appendNew(procedure, Above, origin,
+        y,
+        start->appendNew(procedure, origin, maxExponentForIntegerMathPow));
+    start->appendNew(procedure, Branch, origin, shouldGoSlowPath);
+    start->setSuccessors(FrequentedBlock(functionCallCase), FrequentedBlock(loopPreHeaderCase));
+
+    // Function call.
+    Value* yAsDouble = functionCallCase->appendNew(procedure, IToD, origin, y);
+    double (*powDouble)(double, double) = pow;
+    Value* powResult = functionCallCase->appendNew(
+        procedure, Double, origin,
+        functionCallCase->appendNew(procedure, origin, bitwise_cast(powDouble)),
+        x, yAsDouble);
+    UpsilonValue* powResultUpsilon = functionCallCase->appendNew(procedure, origin, powResult);
+    functionCallCase->appendNew(procedure, Jump, origin);
+    functionCallCase->setSuccessors(FrequentedBlock(continuation));
+
+    // Loop pre-header.
+    Value* initialResult = loopPreHeaderCase->appendNew(procedure, origin, 1.);
+    UpsilonValue* initialLoopValue = loopPreHeaderCase->appendNew(procedure, origin, initialResult);
+    UpsilonValue* initialResultValue = loopPreHeaderCase->appendNew(procedure, origin, initialResult);
+    UpsilonValue* initialSquaredInput = loopPreHeaderCase->appendNew(procedure, origin, x);
+    UpsilonValue* initialLoopCounter = loopPreHeaderCase->appendNew(procedure, origin, y);
+    loopPreHeaderCase->appendNew(procedure, Jump, origin);
+    loopPreHeaderCase->setSuccessors(FrequentedBlock(loopTestForEvenCase));
+
+    // Test if what is left of the counter is even.
+    Value* inLoopCounter = loopTestForEvenCase->appendNew(procedure, Phi, Int32, origin);
+    Value* inLoopSquaredInput = loopTestForEvenCase->appendNew(procedure, Phi, Double, origin);
+    Value* lastCounterBit = loopTestForEvenCase->appendNew(procedure, BitAnd, origin,
+        inLoopCounter,
+        loopTestForEvenCase->appendNew(procedure, origin, 1));
+    loopTestForEvenCase->appendNew(procedure, Branch, origin, lastCounterBit);
+    loopTestForEvenCase->setSuccessors(FrequentedBlock(loopOdd), FrequentedBlock(loopEvenOdd));
+
+    // Counter is odd.
+    Value* inLoopResult = loopOdd->appendNew(procedure, Phi, Double, origin);
+    Value* updatedResult = loopOdd->appendNew(procedure, Mul, origin, inLoopResult, inLoopSquaredInput);
+    UpsilonValue* updatedLoopResultUpsilon = loopOdd->appendNew(procedure, origin, updatedResult);
+    initialLoopValue->setPhi(inLoopResult);
+    updatedLoopResultUpsilon->setPhi(inLoopResult);
+    UpsilonValue* updatedLoopResult = loopOdd->appendNew(procedure, origin, updatedResult);
+
+    loopOdd->appendNew(procedure, Jump, origin);
+    loopOdd->setSuccessors(FrequentedBlock(loopEvenOdd));
+
+    // Even value and following the Odd.
+    Value* squaredInput = loopEvenOdd->appendNew(procedure, Mul, origin, inLoopSquaredInput, inLoopSquaredInput);
+    UpsilonValue* squaredInputUpsilon = loopEvenOdd->appendNew(procedure, origin, squaredInput);
+    initialSquaredInput->setPhi(inLoopSquaredInput);
+    squaredInputUpsilon->setPhi(inLoopSquaredInput);
+
+    Value* updatedCounter = loopEvenOdd->appendNew(procedure, ZShr, origin,
+        inLoopCounter,
+        loopEvenOdd->appendNew(procedure, origin, 1));
+    UpsilonValue* updatedCounterUpsilon = loopEvenOdd->appendNew(procedure, origin, updatedCounter);
+    initialLoopCounter->setPhi(inLoopCounter);
+    updatedCounterUpsilon->setPhi(inLoopCounter);
+
+    loopEvenOdd->appendNew(procedure, Branch, origin, updatedCounter);
+    loopEvenOdd->setSuccessors(FrequentedBlock(loopTestForEvenCase), FrequentedBlock(continuation));
+
+    // Inline loop.
+    Value* finalResultPhi = continuation->appendNew(procedure, Phi, Double, origin);
+    powResultUpsilon->setPhi(finalResultPhi);
+    initialResultValue->setPhi(finalResultPhi);
+    updatedLoopResult->setPhi(finalResultPhi);
+    return std::make_pair(continuation, finalResultPhi);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3MathExtras.h b/b3/B3MathExtras.h
new file mode 100644
index 0000000..b6bddea
--- /dev/null
+++ b/b3/B3MathExtras.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Procedure;
+class Value;
+
+// Raise "x" to "y" power.
+// Return a new block continuing the flow and the value representing the result.
+JS_EXPORT_PRIVATE std::pair powDoubleInt32(Procedure&, BasicBlock*, Origin, Value* x, Value* y);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3MemoryValue.cpp b/b3/B3MemoryValue.cpp
new file mode 100644
index 0000000..3764b74
--- /dev/null
+++ b/b3/B3MemoryValue.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3MemoryValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+MemoryValue::~MemoryValue()
+{
+}
+
+size_t MemoryValue::accessByteSize() const
+{
+    switch (opcode()) {
+    case Load8Z:
+    case Load8S:
+    case Store8:
+        return 1;
+    case Load16Z:
+    case Load16S:
+    case Store16:
+        return 2;
+    case Load:
+        return sizeofType(type());
+    case Store:
+        return sizeofType(child(0)->type());
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+void MemoryValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    if (m_offset)
+        out.print(comma, "offset = ", m_offset);
+    if ((isLoad() && effects().reads != range())
+        || (isStore() && effects().writes != range()))
+        out.print(comma, "range = ", range());
+}
+
+Value* MemoryValue::cloneImpl() const
+{
+    return new MemoryValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3MemoryValue.h b/b3/B3MemoryValue.h
new file mode 100644
index 0000000..9a0504f
--- /dev/null
+++ b/b3/B3MemoryValue.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+// FIXME: We want to allow fenced memory accesses on ARM.
+// https://bugs.webkit.org/show_bug.cgi?id=162349
+
+class JS_EXPORT_PRIVATE MemoryValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isStore(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case Store8:
+        case Store16:
+        case Store:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isLoad(Kind kind)
+    {
+        return accepts(kind) && !isStore(kind);
+    }
+
+    ~MemoryValue();
+
+    int32_t offset() const { return m_offset; }
+    void setOffset(int32_t offset) { m_offset = offset; }
+
+    const HeapRange& range() const { return m_range; }
+    void setRange(const HeapRange& range) { m_range = range; }
+
+    bool isStore() const { return type() == Void; }
+    bool isLoad() const { return type() != Void; }
+
+    size_t accessByteSize() const;
+
+protected:
+    void dumpMeta(CommaPrinter& comma, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Use this form for Load (but not Load8Z, Load8S, or any of the Loads that have a suffix that
+    // describes the returned type).
+    MemoryValue(Kind kind, Type type, Origin origin, Value* pointer, int32_t offset = 0)
+        : Value(CheckedOpcode, kind, type, origin, pointer)
+        , m_offset(offset)
+        , m_range(HeapRange::top())
+    {
+        if (!ASSERT_DISABLED) {
+            switch (kind.opcode()) {
+            case Load:
+                break;
+            case Load8Z:
+            case Load8S:
+            case Load16Z:
+            case Load16S:
+                ASSERT(type == Int32);
+                break;
+            case Store8:
+            case Store16:
+            case Store:
+                ASSERT(type == Void);
+                break;
+            default:
+                ASSERT_NOT_REACHED();
+            }
+        }
+    }
+
+    // Use this form for loads where the return type is implied.
+    MemoryValue(Kind kind, Origin origin, Value* pointer, int32_t offset = 0)
+        : MemoryValue(kind, Int32, origin, pointer, offset)
+    {
+    }
+
+    // Use this form for stores.
+    MemoryValue(Kind kind, Origin origin, Value* value, Value* pointer, int32_t offset = 0)
+        : Value(CheckedOpcode, kind, Void, origin, value, pointer)
+        , m_offset(offset)
+        , m_range(HeapRange::top())
+    {
+        if (!ASSERT_DISABLED) {
+            switch (kind.opcode()) {
+            case Store8:
+            case Store16:
+            case Store:
+                break;
+            default:
+                ASSERT_NOT_REACHED();
+                break;
+            }
+        }
+    }
+
+    int32_t m_offset { 0 };
+    HeapRange m_range;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3MoveConstants.cpp b/b3/B3MoveConstants.cpp
new file mode 100644
index 0000000..0d98773
--- /dev/null
+++ b/b3/B3MoveConstants.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3MoveConstants.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3ProcedureInlines.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class MoveConstants {
+public:
+    MoveConstants(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+    {
+    }
+
+    void run()
+    {
+        hoistConstants(
+            [&] (const ValueKey& key) -> bool {
+                return key.opcode() == ConstFloat || key.opcode() == ConstDouble;
+            });
+        
+        lowerFPConstants();
+        
+        hoistConstants(
+            [&] (const ValueKey& key) -> bool {
+                return key.opcode() == Const32 || key.opcode() == Const64 || key.opcode() == ArgumentReg;
+            });
+    }
+
+private:
+    template
+    void hoistConstants(const Filter& filter)
+    {
+        Dominators& dominators = m_proc.dominators();
+        HashMap valueForConstant;
+        IndexMap> materializations(m_proc.size());
+
+        // We determine where things get materialized based on where they are used.
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                for (Value*& child : value->children()) {
+                    ValueKey key = child->key();
+                    if (!filter(key))
+                        continue;
+
+                    auto result = valueForConstant.add(key, child);
+                    if (result.isNewEntry) {
+                        // Assume that this block is where we want to materialize the value.
+                        child->owner = block;
+                        continue;
+                    }
+
+                    // Make 'value' use the canonical constant rather than the one it was using.
+                    child = result.iterator->value;
+
+                    // Determine the least common dominator. That's the lowest place in the CFG where
+                    // we could materialize the constant while still having only one materialization
+                    // in the resulting code.
+                    while (!dominators.dominates(child->owner, block))
+                        child->owner = dominators.idom(child->owner);
+                }
+            }
+        }
+
+        // Make sure that each basic block knows what to materialize. This also refines the
+        // materialization block based on execution frequency. It finds the minimum block frequency
+        // of all of its dominators, and selects the closest block amongst those that are tied for
+        // lowest frequency.
+        for (auto& entry : valueForConstant) {
+            Value* value = entry.value;
+            for (BasicBlock* block = value->owner; block; block = dominators.idom(block)) {
+                if (block->frequency() < value->owner->frequency())
+                    value->owner = block;
+            }
+            materializations[value->owner].append(value);
+        }
+
+        // Get rid of Value's that are fast constants but aren't canonical. Also remove the canonical
+        // ones from the CFG, since we're going to reinsert them elsewhere.
+        for (BasicBlock* block : m_proc) {
+            for (Value*& value : *block) {
+                ValueKey key = value->key();
+                if (!filter(key))
+                    continue;
+
+                if (valueForConstant.get(key) == value)
+                    value = m_proc.add(Nop, value->origin());
+                else
+                    value->replaceWithNopIgnoringType();
+            }
+        }
+
+        // Now make sure that we move constants to where they are supposed to go. Again, we do this
+        // based on uses.
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+                
+                // This finds the outermost (best) block last. So, the functor overrides the result
+                // each time it finds something acceptable.
+                auto findBestConstant = [&] (const auto& predicate) -> Value* {
+                    Value* result = nullptr;
+                    dominators.forAllDominatorsOf(
+                        block,
+                        [&] (BasicBlock* dominator) {
+                            for (Value* value : materializations[dominator]) {
+                                if (predicate(value)) {
+                                    result = value;
+                                    break;
+                                }
+                            }
+                        });
+                    return result;
+                };
+                
+                // We call this when we have found a constant that we'd like to use. It's possible that
+                // we have computed that the constant should be meterialized in this block, but we
+                // haven't inserted it yet. This inserts the constant if necessary.
+                auto materialize = [&] (Value* child) {
+                    ValueKey key = child->key();
+                    if (!filter(key))
+                        return;
+
+                    // If we encounter a fast constant, then it must be canonical, since we already
+                    // got rid of the non-canonical ones.
+                    ASSERT(valueForConstant.get(key) == child);
+
+                    if (child->owner != block) {
+                        // This constant isn't our problem. It's going to be materialized in another
+                        // block.
+                        return;
+                    }
+                    
+                    // We're supposed to materialize this constant in this block, and we haven't
+                    // done it yet.
+                    m_insertionSet.insertValue(valueIndex, child);
+                    child->owner = nullptr;
+                };
+                
+                if (MemoryValue* memoryValue = value->as()) {
+                    Value* pointer = memoryValue->lastChild();
+                    if (pointer->hasIntPtr() && filter(pointer->key())) {
+                        auto desiredOffset = [&] (Value* otherPointer) -> intptr_t {
+                            // We would turn this:
+                            //
+                            //     Load(@p, offset = c)
+                            //
+                            // into this:
+                            //
+                            //     Load(@q, offset = ?)
+                            //
+                            // The offset should be c + @p - @q, because then we're loading from:
+                            //
+                            //     @q + c + @p - @q
+                            uintptr_t c = static_cast(static_cast(memoryValue->offset()));
+                            uintptr_t p = pointer->asIntPtr();
+                            uintptr_t q = otherPointer->asIntPtr();
+                            return c + p - q;
+                        };
+                        
+                        Value* bestPointer = findBestConstant(
+                            [&] (Value* candidatePointer) -> bool {
+                                if (!candidatePointer->hasIntPtr())
+                                    return false;
+                                
+                                intptr_t offset = desiredOffset(candidatePointer);
+                                if (!B3::isRepresentableAs(static_cast(offset)))
+                                    return false;
+                                return Air::Arg::isValidAddrForm(
+                                    static_cast(offset),
+                                    Air::Arg::widthForBytes(memoryValue->accessByteSize()));
+                            });
+                        
+                        if (bestPointer) {
+                            memoryValue->lastChild() = bestPointer;
+                            memoryValue->setOffset(desiredOffset(bestPointer));
+                        }
+                    }
+                } else {
+                    switch (value->opcode()) {
+                    case Add:
+                    case Sub: {
+                        Value* addend = value->child(1);
+                        if (!addend->hasInt() || !filter(addend->key()))
+                            break;
+                        int64_t addendConst = addend->asInt();
+                        Value* bestAddend = findBestConstant(
+                            [&] (Value* candidateAddend) -> bool {
+                                if (candidateAddend->type() != addend->type())
+                                    return false;
+                                if (!candidateAddend->hasInt())
+                                    return false;
+                                return candidateAddend == addend
+                                    || candidateAddend->asInt() == -addendConst;
+                            });
+                        if (!bestAddend || bestAddend == addend)
+                            break;
+                        materialize(value->child(0));
+                        materialize(bestAddend);
+                        value->replaceWithIdentity(
+                            m_insertionSet.insert(
+                                valueIndex, value->opcode() == Add ? Sub : Add, value->origin(),
+                                value->child(0), bestAddend));
+                        break;
+                    }
+                    default:
+                        break;
+                    }
+                }
+                
+                for (Value* child : value->children())
+                    materialize(child);
+            }
+
+            // We may have some constants that need to be materialized right at the end of this
+            // block.
+            for (Value* value : materializations[block]) {
+                if (!value->owner) {
+                    // It's already materialized in this block.
+                    continue;
+                }
+
+                m_insertionSet.insertValue(block->size() - 1, value);
+            }
+            m_insertionSet.execute(block);
+        }
+    }
+
+    void lowerFPConstants()
+    {
+        for (Value* value : m_proc.values()) {
+            ValueKey key = value->key();
+            if (goesInTable(key))
+                m_constTable.add(key, m_constTable.size());
+        }
+        
+        m_dataSection = static_cast(m_proc.addDataSection(m_constTable.size() * sizeof(int64_t)));
+        for (auto& entry : m_constTable)
+            m_dataSection[entry.value] = entry.key.value();
+
+        IndexSet offLimits;
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                StackmapValue* value = block->at(valueIndex)->as();
+                if (!value)
+                    continue;
+
+                for (unsigned childIndex = 0; childIndex < value->numChildren(); ++childIndex) {
+                    if (!value->constrainedChild(childIndex).rep().isAny())
+                        continue;
+                    
+                    Value*& child = value->child(childIndex);
+                    ValueKey key = child->key();
+                    if (!goesInTable(key))
+                        continue;
+
+                    child = m_insertionSet.insertValue(
+                        valueIndex, key.materialize(m_proc, value->origin()));
+                    offLimits.add(child);
+                }
+            }
+
+            m_insertionSet.execute(block);
+        }
+
+        for (BasicBlock* block : m_proc) {
+            for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) {
+                Value* value = block->at(valueIndex);
+                ValueKey key = value->key();
+                if (!goesInTable(key))
+                    continue;
+                if (offLimits.contains(value))
+                    continue;
+
+                Value* tableBase = m_insertionSet.insertIntConstant(
+                    valueIndex, value->origin(), pointerType(),
+                    bitwise_cast(m_dataSection));
+                Value* result = m_insertionSet.insert(
+                    valueIndex, Load, value->type(), value->origin(), tableBase,
+                    sizeof(int64_t) * m_constTable.get(key));
+                value->replaceWithIdentity(result);
+            }
+
+            m_insertionSet.execute(block);
+        }
+    }
+
+    bool goesInTable(const ValueKey& key)
+    {
+        return (key.opcode() == ConstDouble && key != doubleZero())
+            || (key.opcode() == ConstFloat && key != floatZero());
+    }
+
+    static ValueKey doubleZero()
+    {
+        return ValueKey(ConstDouble, Double, 0.0);
+    }
+
+    static ValueKey floatZero()
+    {
+        return ValueKey(ConstFloat, Double, 0.0);
+    }
+
+    Procedure& m_proc;
+    Vector m_toRemove;
+    HashMap m_constTable;
+    int64_t* m_dataSection;
+    HashMap m_constants;
+    InsertionSet m_insertionSet;
+};
+
+} // anonymous namespace
+
+void moveConstants(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "moveConstants");
+    MoveConstants moveConstants(proc);
+    moveConstants.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3MoveConstants.h b/b3/B3MoveConstants.h
new file mode 100644
index 0000000..b9f92ff
--- /dev/null
+++ b/b3/B3MoveConstants.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Moves large constants around, with the goal of placing them in the optimal points in the program.
+
+JS_EXPORT_PRIVATE void moveConstants(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3OpaqueByproduct.h b/b3/B3OpaqueByproduct.h
new file mode 100644
index 0000000..35a2a06
--- /dev/null
+++ b/b3/B3OpaqueByproduct.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproduct {
+    WTF_MAKE_NONCOPYABLE(OpaqueByproduct);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    OpaqueByproduct() { }
+    virtual ~OpaqueByproduct() { }
+
+    virtual void dump(PrintStream&) const = 0;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3OpaqueByproducts.cpp b/b3/B3OpaqueByproducts.cpp
new file mode 100644
index 0000000..f89f8bf
--- /dev/null
+++ b/b3/B3OpaqueByproducts.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3OpaqueByproducts.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+OpaqueByproducts::OpaqueByproducts()
+{
+}
+
+OpaqueByproducts::~OpaqueByproducts()
+{
+}
+
+void OpaqueByproducts::add(std::unique_ptr byproduct)
+{
+    m_byproducts.append(WTFMove(byproduct));
+}
+
+void OpaqueByproducts::dump(PrintStream& out) const
+{
+    out.print("Byproducts:\n");
+    if (m_byproducts.isEmpty()) {
+        out.print("    \n");
+        return;
+    }
+    for (auto& byproduct : m_byproducts)
+        out.print("    ", *byproduct, "\n");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3OpaqueByproducts.h b/b3/B3OpaqueByproducts.h
new file mode 100644
index 0000000..e8eec11
--- /dev/null
+++ b/b3/B3OpaqueByproducts.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproduct.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class OpaqueByproducts {
+    WTF_MAKE_NONCOPYABLE(OpaqueByproducts)
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    OpaqueByproducts();
+    JS_EXPORT_PRIVATE ~OpaqueByproducts();
+
+    size_t count() const { return m_byproducts.size(); }
+    
+    void add(std::unique_ptr);
+
+    void dump(PrintStream&) const;
+
+private:
+    Vector> m_byproducts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Opcode.cpp b/b3/B3Opcode.cpp
new file mode 100644
index 0000000..a0aa5a9
--- /dev/null
+++ b/b3/B3Opcode.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Opcode.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+std::optional invertedCompare(Opcode opcode, Type type)
+{
+    switch (opcode) {
+    case Equal:
+        return NotEqual;
+    case NotEqual:
+        return Equal;
+    case LessThan:
+        if (isInt(type))
+            return GreaterEqual;
+        return std::nullopt;
+    case GreaterThan:
+        if (isInt(type))
+            return LessEqual;
+        return std::nullopt;
+    case LessEqual:
+        if (isInt(type))
+            return GreaterThan;
+        return std::nullopt;
+    case GreaterEqual:
+        if (isInt(type))
+            return LessThan;
+        return std::nullopt;
+    case Above:
+        return BelowEqual;
+    case Below:
+        return AboveEqual;
+    case AboveEqual:
+        return Below;
+    case BelowEqual:
+        return Above;
+    default:
+        return std::nullopt;
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Opcode opcode)
+{
+    switch (opcode) {
+    case Nop:
+        out.print("Nop");
+        return;
+    case Identity:
+        out.print("Identity");
+        return;
+    case Const32:
+        out.print("Const32");
+        return;
+    case Const64:
+        out.print("Const64");
+        return;
+    case ConstDouble:
+        out.print("ConstDouble");
+        return;
+    case ConstFloat:
+        out.print("ConstFloat");
+        return;
+    case Get:
+        out.print("Get");
+        return;
+    case Set:
+        out.print("Set");
+        return;
+    case SlotBase:
+        out.print("SlotBase");
+        return;
+    case ArgumentReg:
+        out.print("ArgumentReg");
+        return;
+    case FramePointer:
+        out.print("FramePointer");
+        return;
+    case Add:
+        out.print("Add");
+        return;
+    case Sub:
+        out.print("Sub");
+        return;
+    case Mul:
+        out.print("Mul");
+        return;
+    case Div:
+        out.print("Div");
+        return;
+    case UDiv:
+        out.print("UDiv");
+        return;
+    case Mod:
+        out.print("Mod");
+        return;
+    case UMod:
+        out.print("UMod");
+        return;
+    case Neg:
+        out.print("Neg");
+        return;
+    case BitAnd:
+        out.print("BitAnd");
+        return;
+    case BitOr:
+        out.print("BitOr");
+        return;
+    case BitXor:
+        out.print("BitXor");
+        return;
+    case Shl:
+        out.print("Shl");
+        return;
+    case SShr:
+        out.print("SShr");
+        return;
+    case ZShr:
+        out.print("ZShr");
+        return;
+    case RotR:
+        out.print("RotR");
+        return;
+    case RotL:
+        out.print("RotL");
+        return;
+    case Clz:
+        out.print("Clz");
+        return;
+    case Abs:
+        out.print("Abs");
+        return;
+    case Ceil:
+        out.print("Ceil");
+        return;
+    case Floor:
+        out.print("Floor");
+        return;
+    case Sqrt:
+        out.print("Sqrt");
+        return;
+    case BitwiseCast:
+        out.print("BitwiseCast");
+        return;
+    case SExt8:
+        out.print("SExt8");
+        return;
+    case SExt16:
+        out.print("SExt16");
+        return;
+    case SExt32:
+        out.print("SExt32");
+        return;
+    case ZExt32:
+        out.print("ZExt32");
+        return;
+    case Trunc:
+        out.print("Trunc");
+        return;
+    case IToD:
+        out.print("IToD");
+        return;
+    case IToF:
+        out.print("IToF");
+        return;
+    case FloatToDouble:
+        out.print("FloatToDouble");
+        return;
+    case DoubleToFloat:
+        out.print("DoubleToFloat");
+        return;
+    case Equal:
+        out.print("Equal");
+        return;
+    case NotEqual:
+        out.print("NotEqual");
+        return;
+    case LessThan:
+        out.print("LessThan");
+        return;
+    case GreaterThan:
+        out.print("GreaterThan");
+        return;
+    case LessEqual:
+        out.print("LessEqual");
+        return;
+    case GreaterEqual:
+        out.print("GreaterEqual");
+        return;
+    case Above:
+        out.print("Above");
+        return;
+    case Below:
+        out.print("Below");
+        return;
+    case AboveEqual:
+        out.print("AboveEqual");
+        return;
+    case BelowEqual:
+        out.print("BelowEqual");
+        return;
+    case EqualOrUnordered:
+        out.print("EqualOrUnordered");
+        return;
+    case Select:
+        out.print("Select");
+        return;
+    case Load8Z:
+        out.print("Load8Z");
+        return;
+    case Load8S:
+        out.print("Load8S");
+        return;
+    case Load16Z:
+        out.print("Load16Z");
+        return;
+    case Load16S:
+        out.print("Load16S");
+        return;
+    case Load:
+        out.print("Load");
+        return;
+    case Store8:
+        out.print("Store8");
+        return;
+    case Store16:
+        out.print("Store16");
+        return;
+    case Store:
+        out.print("Store");
+        return;
+    case WasmAddress:
+        out.print("WasmAddress");
+        return;
+    case Fence:
+        out.print("Fence");
+        return;
+    case CCall:
+        out.print("CCall");
+        return;
+    case Patchpoint:
+        out.print("Patchpoint");
+        return;
+    case CheckAdd:
+        out.print("CheckAdd");
+        return;
+    case CheckSub:
+        out.print("CheckSub");
+        return;
+    case CheckMul:
+        out.print("CheckMul");
+        return;
+    case Check:
+        out.print("Check");
+        return;
+    case WasmBoundsCheck:
+        out.print("WasmBoundsCheck");
+        return;
+    case Upsilon:
+        out.print("Upsilon");
+        return;
+    case Phi:
+        out.print("Phi");
+        return;
+    case Jump:
+        out.print("Jump");
+        return;
+    case Branch:
+        out.print("Branch");
+        return;
+    case Switch:
+        out.print("Switch");
+        return;
+    case EntrySwitch:
+        out.print("EntrySwitch");
+        return;
+    case Return:
+        out.print("Return");
+        return;
+    case Oops:
+        out.print("Oops");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Opcode.h b/b3/B3Opcode.h
new file mode 100644
index 0000000..956dba9
--- /dev/null
+++ b/b3/B3Opcode.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// Warning: In B3, an Opcode is just one part of a Kind. Kind is used the way that an opcode
+// would be used in simple IRs. See B3Kind.h.
+
+enum Opcode : int16_t {
+    // A no-op that returns Void, useful for when you want to remove a value.
+    Nop,
+    
+    // Polymorphic identity, usable with any value type.
+    Identity,
+
+    // Constants. Use the ConstValue* classes. Constants exist in the control flow, so that we can
+    // reason about where we would construct them. Large constants are expensive to create.
+    Const32,
+    Const64,
+    ConstDouble,
+    ConstFloat,
+
+    // B3 supports non-SSA variables. These are accessed using Get and Set opcodes. Use the
+    // VariableValue class. It's a good idea to run fixSSA() to turn these into SSA. The
+    // optimizer will do that eventually, but if your input tends to use these opcodes, you
+    // should run fixSSA() directly before launching the optimizer.
+    Set,
+    Get,
+
+    // Gets the base address of a StackSlot.
+    SlotBase,
+
+    // The magical argument register. This is viewed as executing at the top of the program
+    // regardless of where in control flow you put it, and the compiler takes care to ensure that we
+    // don't clobber the value by register allocation or calls (either by saving the argument to the
+    // stack or preserving it in a callee-save register). Use the ArgumentRegValue class. The return
+    // type is either pointer() (for GPRs) or Double (for FPRs).
+    ArgumentReg,
+
+    // The frame pointer. You can put this anywhere in control flow but it will always yield the
+    // frame pointer, with a caveat: if our compiler changes the frame pointer temporarily for some
+    // silly reason, the FramePointer intrinsic will return where the frame pointer *should* be not
+    // where it happens to be right now.
+    FramePointer,
+
+    // Polymorphic math, usable with any value type.
+    Add,
+    Sub,
+    Mul,
+    Div, // All bets are off as to what will happen when you execute this for -2^31/-1 and x/0.
+    UDiv,
+    Mod, // All bets are off as to what will happen when you execute this for -2^31%-1 and x%0.
+    UMod,
+
+
+    // Polymorphic negation. Note that we only need this for floating point, since integer negation
+    // is exactly like Sub(0, x). But that's not true for floating point. Sub(0, 0) is 0, while
+    // Neg(0) is -0. Also, we canonicalize Sub(0, x) into Neg(x) in case of integers.
+    Neg,
+
+    // Integer math.
+    BitAnd,
+    BitOr,
+    BitXor,
+    Shl,
+    SShr, // Arithmetic Shift.
+    ZShr, // Logical Shift.
+    RotR, // Rotate Right.
+    RotL, // Rotate Left.
+    Clz, // Count leading zeros.
+
+    // Floating point math.
+    Abs,
+    Ceil,
+    Floor,
+    Sqrt,
+
+    // Casts and such.
+    // Bitwise Cast of Double->Int64 or Int64->Double
+    BitwiseCast,
+    // Takes and returns Int32:
+    SExt8,
+    SExt16,
+    // Takes Int32 and returns Int64:
+    SExt32,
+    ZExt32,
+    // Does a bitwise truncation of Int64->Int32 and Double->Float:
+    Trunc,
+    // Takes ints and returns floating point value. Note that we don't currently provide the opposite operation,
+    // because double-to-int conversions have weirdly different semantics on different platforms. Use
+    // a patchpoint if you need to do that.
+    IToD,
+    IToF,
+    // Convert between double and float.
+    FloatToDouble,
+    DoubleToFloat,
+
+    // Polymorphic comparisons, usable with any value type. Returns int32 0 or 1. Note that "Not"
+    // is just Equal(x, 0), and "ToBoolean" is just NotEqual(x, 0).
+    Equal,
+    NotEqual,
+    LessThan,
+    GreaterThan,
+    LessEqual,
+    GreaterEqual,
+
+    // Integer comparisons. Returns int32 0 or 1.
+    Above,
+    Below,
+    AboveEqual,
+    BelowEqual,
+
+    // Unordered floating point compare: values are equal or either one is NaN.
+    EqualOrUnordered,
+
+    // SSA form of conditional move. The first child is evaluated for truthiness. If true, the second child
+    // is returned. Otherwise, the third child is returned.
+    Select,
+
+    // Memory loads. Opcode indicates how we load and the loaded type. These use MemoryValue.
+    // These return Int32:
+    Load8Z,
+    Load8S,
+    Load16Z,
+    Load16S,
+    // This returns whatever the return type is:
+    Load,
+
+    // Memory stores. Opcode indicates how the value is stored. These use MemoryValue.
+    // These take an Int32 value:
+    Store8,
+    Store16,
+    // This is a polymorphic store for Int32, Int64, Float, and Double.
+    Store,
+
+    // This is used to compute the actual address of a Wasm memory operation. It takes an IntPtr
+    // and a pinned register then computes the appropriate IntPtr address. For the use-case of
+    // Wasm it is important that the first child initially be a ZExt32 so the top bits are cleared.
+    // We do WasmAddress(ZExt32(ptr), ...) so that we can avoid generating extraneous moves in Air.
+    WasmAddress,
+    
+    // This is used to represent standalone fences - i.e. fences that are not part of other
+    // instructions. It's expressive enough to expose mfence on x86 and dmb ish/ishst on ARM. On
+    // x86, it also acts as a compiler store-store fence in those cases where it would have been a
+    // dmb ishst on ARM.
+    Fence,
+
+    // This is a regular ordinary C function call, using the system C calling convention. Make sure
+    // that the arguments are passed using the right types. The first argument is the callee.
+    CCall,
+
+    // This is a patchpoint. Use the PatchpointValue class. This is viewed as behaving like a call,
+    // but only emits code via a code generation callback. That callback gets to emit code inline.
+    // You can pass a stackmap along with constraints on how each stackmap argument must be passed.
+    // It's legal to request that a stackmap argument is in some register and it's legal to request
+    // that a stackmap argument is at some offset from the top of the argument passing area on the
+    // stack.
+    Patchpoint,
+
+    // Checked math. Use the CheckValue class. Like a Patchpoint, this takes a code generation
+    // callback. That callback gets to emit some code after the epilogue, and gets to link the jump
+    // from the check, and the choice of registers. You also get to supply a stackmap. Note that you
+    // are not allowed to jump back into the mainline code from your slow path, since the compiler
+    // will assume that the execution of these instructions proves that overflow didn't happen. For
+    // example, if you have two CheckAdd's:
+    //
+    // a = CheckAdd(x, y)
+    // b = CheckAdd(x, y)
+    //
+    // Then it's valid to change this to:
+    //
+    // a = CheckAdd(x, y)
+    // b = Identity(a)
+    //
+    // This is valid regardless of the callbacks used by the two CheckAdds. They may have different
+    // callbacks. Yet, this transformation is valid even if they are different because we know that
+    // after the first CheckAdd executes, the second CheckAdd could not have possibly taken slow
+    // path. Therefore, the second CheckAdd's callback is irrelevant.
+    //
+    // Note that the first two children of these operations have ValueRep's as input constraints but do
+    // not have output constraints.
+    CheckAdd,
+    CheckSub,
+    CheckMul,
+
+    // Check that side-exits. Use the CheckValue class. Like CheckAdd and friends, this has a
+    // stackmap with a generation callback. This takes an int argument that this branches on, with
+    // full branch fusion in the instruction selector. A true value jumps to the generator's slow
+    // path. Note that the predicate child is has both an input ValueRep. The input constraint must be
+    // WarmAny. It will not have an output constraint.
+    Check,
+
+    // Special Wasm opcode that takes a Int32, a special pinned gpr and an offset. This node exists
+    // to allow us to CSE WasmBoundsChecks if both use the same pointer and one dominates the other.
+    // Without some such node B3 would not have enough information about the inner workings of wasm
+    // to be able to perform such optimizations.
+    WasmBoundsCheck,
+
+    // SSA support, in the style of DFG SSA.
+    Upsilon, // This uses the UpsilonValue class.
+    Phi,
+
+    // Jump.
+    Jump,
+    
+    // Polymorphic branch, usable with any integer type. Branches if not equal to zero. The 0-index
+    // successor is the true successor.
+    Branch,
+
+    // Switch. Switches over either Int32 or Int64. Uses the SwitchValue class.
+    Switch,
+    
+    // Multiple entrypoints are supported via the EntrySwitch operation. Place this in the root
+    // block and list the entrypoints as the successors. All blocks backwards-reachable from
+    // EntrySwitch are duplicated for each entrypoint.
+    EntrySwitch,
+
+    // Return. Note that B3 procedures don't know their return type, so this can just return any
+    // type.
+    Return,
+
+    // This is a terminal that indicates that we will never get here.
+    Oops
+};
+
+inline bool isCheckMath(Opcode opcode)
+{
+    switch (opcode) {
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return true;
+    default:
+        return false;
+    }
+}
+
+std::optional invertedCompare(Opcode, Type);
+
+inline Opcode constPtrOpcode()
+{
+    if (is64Bit())
+        return Const64;
+    return Const32;
+}
+
+inline bool isConstant(Opcode opcode)
+{
+    switch (opcode) {
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isDefinitelyTerminal(Opcode opcode)
+{
+    switch (opcode) {
+    case Jump:
+    case Branch:
+    case Switch:
+    case Oops:
+    case Return:
+        return true;
+    default:
+        return false;
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Opcode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Origin.cpp b/b3/B3Origin.cpp
new file mode 100644
index 0000000..8baf012
--- /dev/null
+++ b/b3/B3Origin.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Origin.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+void Origin::dump(PrintStream& out) const
+{
+    out.print("Origin(", RawPointer(m_data), ")");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Origin.h b/b3/B3Origin.h
new file mode 100644
index 0000000..47fd10f
--- /dev/null
+++ b/b3/B3Origin.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+// Whoever generates B3IR can choose to put origins on values. When you do this, B3 will be able to
+// account, down to the machine code, which instruction corresponds to which origin. B3
+// transformations must preserve Origins carefully. It's an error to write a transformation that
+// either drops Origins or lies about them.
+class Origin {
+public:
+    explicit Origin(const void* data = nullptr)
+        : m_data(data)
+    {
+    }
+
+    explicit operator bool() const { return !!m_data; }
+
+    const void* data() const { return m_data; }
+
+    bool operator==(const Origin& other) const { return m_data == other.m_data; }
+
+    // You should avoid using this. Use OriginDump instead.
+    void dump(PrintStream&) const;
+    
+private:
+    const void* m_data;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3OriginDump.cpp b/b3/B3OriginDump.cpp
new file mode 100644
index 0000000..da7afee
--- /dev/null
+++ b/b3/B3OriginDump.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3OriginDump.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+void OriginDump::dump(PrintStream& out) const
+{
+    if (m_proc)
+        m_proc->printOrigin(out, m_origin);
+    else
+        out.print(m_origin);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3OriginDump.h b/b3/B3OriginDump.h
new file mode 100644
index 0000000..5392ac9
--- /dev/null
+++ b/b3/B3OriginDump.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class OriginDump {
+public:
+    OriginDump(const Procedure* proc, Origin origin)
+        : m_proc(proc)
+        , m_origin(origin)
+    {
+    }
+
+    void dump(PrintStream& out) const;
+
+private:
+    const Procedure* m_proc;
+    Origin m_origin;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PCToOriginMap.h b/b3/B3PCToOriginMap.h
new file mode 100644
index 0000000..5e6ce45
--- /dev/null
+++ b/b3/B3PCToOriginMap.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Origin.h"
+#include "MacroAssembler.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class PCToOriginMap {
+    WTF_MAKE_NONCOPYABLE(PCToOriginMap);
+public:
+    PCToOriginMap()
+    { }
+
+    PCToOriginMap(PCToOriginMap&& other)
+        : m_ranges(WTFMove(other.m_ranges))
+    { }
+
+    struct OriginRange {
+        MacroAssembler::Label label;
+        Origin origin;
+    };
+
+    void appendItem(MacroAssembler::Label label, Origin origin)
+    {
+        if (m_ranges.size()) {
+            if (m_ranges.last().label == label)
+                return;
+        }
+
+        m_ranges.append(OriginRange{label, origin});
+    }
+
+    const Vector& ranges() const  { return m_ranges; }
+
+private:
+    Vector m_ranges;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PatchpointSpecial.cpp b/b3/B3PatchpointSpecial.cpp
new file mode 100644
index 0000000..c5fc588
--- /dev/null
+++ b/b3/B3PatchpointSpecial.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PatchpointSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+PatchpointSpecial::PatchpointSpecial()
+{
+}
+
+PatchpointSpecial::~PatchpointSpecial()
+{
+}
+
+void PatchpointSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    PatchpointValue* patchpoint = inst.origin->as();
+    unsigned argIndex = 1;
+
+    if (patchpoint->type() != Void) {
+        Arg::Role role;
+        if (patchpoint->resultConstraint.kind() == ValueRep::SomeEarlyRegister)
+            role = Arg::EarlyDef;
+        else
+            role = Arg::Def;
+        
+        callback(inst.args[argIndex++], role, inst.origin->airType(), inst.origin->airWidth());
+    }
+
+    forEachArgImpl(0, argIndex, inst, SameAsRep, std::nullopt, callback);
+    argIndex += inst.origin->numChildren();
+
+    for (unsigned i = patchpoint->numGPScratchRegisters; i--;)
+        callback(inst.args[argIndex++], Arg::Scratch, Arg::GP, Arg::conservativeWidth(Arg::GP));
+    for (unsigned i = patchpoint->numFPScratchRegisters; i--;)
+        callback(inst.args[argIndex++], Arg::Scratch, Arg::FP, Arg::conservativeWidth(Arg::FP));
+}
+
+bool PatchpointSpecial::isValid(Inst& inst)
+{
+    PatchpointValue* patchpoint = inst.origin->as();
+    unsigned argIndex = 1;
+
+    if (inst.origin->type() != Void) {
+        if (argIndex >= inst.args.size())
+            return false;
+        
+        if (!isArgValidForValue(inst.args[argIndex], patchpoint))
+            return false;
+        if (!isArgValidForRep(code(), inst.args[argIndex], patchpoint->resultConstraint))
+            return false;
+        argIndex++;
+    }
+
+    if (!isValidImpl(0, argIndex, inst))
+        return false;
+    argIndex += patchpoint->numChildren();
+
+    if (argIndex + patchpoint->numGPScratchRegisters + patchpoint->numFPScratchRegisters
+        != inst.args.size())
+        return false;
+
+    for (unsigned i = patchpoint->numGPScratchRegisters; i--;) {
+        Arg arg = inst.args[argIndex++];
+        if (!arg.isGPTmp())
+            return false;
+    }
+    for (unsigned i = patchpoint->numFPScratchRegisters; i--;) {
+        Arg arg = inst.args[argIndex++];
+        if (!arg.isFPTmp())
+            return false;
+    }
+
+    return true;
+}
+
+bool PatchpointSpecial::admitsStack(Inst& inst, unsigned argIndex)
+{
+    if (inst.origin->type() == Void)
+        return admitsStackImpl(0, 1, inst, argIndex);
+
+    if (argIndex == 1) {
+        switch (inst.origin->as()->resultConstraint.kind()) {
+        case ValueRep::WarmAny:
+        case ValueRep::StackArgument:
+            return true;
+        case ValueRep::SomeRegister:
+        case ValueRep::SomeEarlyRegister:
+        case ValueRep::Register:
+        case ValueRep::LateRegister:
+            return false;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return false;
+        }
+    }
+
+    return admitsStackImpl(0, 2, inst, argIndex);
+}
+
+CCallHelpers::Jump PatchpointSpecial::generate(
+    Inst& inst, CCallHelpers& jit, GenerationContext& context)
+{
+    PatchpointValue* value = inst.origin->as();
+    ASSERT(value);
+
+    Vector reps;
+    unsigned offset = 1;
+    if (inst.origin->type() != Void)
+        reps.append(repForArg(*context.code, inst.args[offset++]));
+    reps.appendVector(repsImpl(context, 0, offset, inst));
+    offset += value->numChildren();
+
+    StackmapGenerationParams params(value, reps, context);
+
+    for (unsigned i = value->numGPScratchRegisters; i--;)
+        params.m_gpScratch.append(inst.args[offset++].gpr());
+    for (unsigned i = value->numFPScratchRegisters; i--;)
+        params.m_fpScratch.append(inst.args[offset++].fpr());
+    
+    value->m_generator->run(jit, params);
+
+    return CCallHelpers::Jump();
+}
+
+bool PatchpointSpecial::isTerminal(Inst& inst)
+{
+    return inst.origin->as()->effects.terminal;
+}
+
+void PatchpointSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print("Patchpoint");
+}
+
+void PatchpointSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("Lowered B3::PatchpointValue.");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PatchpointSpecial.h b/b3/B3PatchpointSpecial.h
new file mode 100644
index 0000000..4e1b2a3
--- /dev/null
+++ b/b3/B3PatchpointSpecial.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackmapSpecial.h"
+
+namespace JSC { namespace B3 {
+
+// This is a special that recognizes that there are two uses of Patchpoint: Void and and non-Void.
+// In the Void case, the syntax of the Air Patch instruction is:
+//
+//     Patch &patchpoint, args...
+//
+// Where "args..." are the lowered arguments to the Patchpoint instruction. In the non-Void case
+// we will have:
+//
+//     Patch &patchpoint, result, args...
+
+class PatchpointSpecial : public StackmapSpecial {
+public:
+    PatchpointSpecial();
+    virtual ~PatchpointSpecial();
+
+protected:
+    void forEachArg(Air::Inst&, const ScopedLambda&) override;
+    bool isValid(Air::Inst&) override;
+    bool admitsStack(Air::Inst&, unsigned argIndex) override;
+
+    // NOTE: the generate method will generate the hidden branch and then register a LatePath that
+    // generates the stackmap. Super crazy dude!
+
+    CCallHelpers::Jump generate(Air::Inst&, CCallHelpers&, Air::GenerationContext&) override;
+    
+    bool isTerminal(Air::Inst&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PatchpointValue.cpp b/b3/B3PatchpointValue.cpp
new file mode 100644
index 0000000..b33c558
--- /dev/null
+++ b/b3/B3PatchpointValue.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PatchpointValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+PatchpointValue::~PatchpointValue()
+{
+}
+
+void PatchpointValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    Base::dumpMeta(comma, out);
+    out.print(comma, "resultConstraint = ", resultConstraint);
+    if (numGPScratchRegisters)
+        out.print(comma, "numGPScratchRegisters = ", numGPScratchRegisters);
+    if (numFPScratchRegisters)
+        out.print(comma, "numFPScratchRegisters = ", numFPScratchRegisters);
+}
+
+Value* PatchpointValue::cloneImpl() const
+{
+    return new PatchpointValue(*this);
+}
+
+PatchpointValue::PatchpointValue(Type type, Origin origin)
+    : Base(CheckedOpcode, Patchpoint, type, origin)
+    , effects(Effects::forCall())
+    , resultConstraint(type == Void ? ValueRep::WarmAny : ValueRep::SomeRegister)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PatchpointValue.h b/b3/B3PatchpointValue.h
new file mode 100644
index 0000000..3378dc4
--- /dev/null
+++ b/b3/B3PatchpointValue.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Effects.h"
+#include "B3StackmapValue.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class PatchpointValue : public StackmapValue {
+public:
+    typedef StackmapValue Base;
+    
+    static bool accepts(Kind kind) { return kind == Patchpoint; }
+
+    ~PatchpointValue();
+
+    // The effects of the patchpoint. This defaults to Effects::forCall(), but you can set it to anything.
+    //
+    // If there are no effects, B3 is free to assume any use of this PatchpointValue can be replaced with
+    // a use of a different PatchpointValue, so long as the other one also has no effects and has the
+    // same children. Note that this comparison ignores child constraints, the result constraint, and all
+    // other StackmapValue meta-data. If there are read effects but not write effects, then this same sort
+    // of substitution could be made so long as there are no interfering writes.
+    Effects effects;
+
+    // The input representation (i.e. constraint) of the return value. This defaults to WarmAny if the
+    // type is Void and it defaults to SomeRegister otherwise. It's illegal to mess with this if the type
+    // is Void. Otherwise you can set this to any input constraint.
+    ValueRep resultConstraint;
+
+    // The number of scratch registers that this patchpoint gets. The scratch register is guaranteed
+    // to be different from any input register and the destination register. It's also guaranteed not
+    // to be clobbered either early or late. These are 0 by default.
+    uint8_t numGPScratchRegisters { 0 };
+    uint8_t numFPScratchRegisters { 0 };
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE PatchpointValue(Type, Origin);
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PhaseScope.cpp b/b3/B3PhaseScope.cpp
new file mode 100644
index 0000000..27b22de
--- /dev/null
+++ b/b3/B3PhaseScope.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3Validate.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+PhaseScope::PhaseScope(Procedure& procedure, const char* name)
+    : m_procedure(procedure)
+    , m_name(name)
+    , m_timingScope(name)
+{
+    if (shouldDumpIRAtEachPhase(B3Mode)) {
+        dataLog("B3 after ", procedure.lastPhaseName(), ", before ", name, ":\n");
+        dataLog(procedure);
+    }
+
+    if (shouldSaveIRBeforePhase())
+        m_dumpBefore = toCString(procedure);
+}
+
+PhaseScope::~PhaseScope()
+{
+    m_procedure.setLastPhaseName(m_name);
+    if (shouldValidateIRAtEachPhase())
+        validate(m_procedure, m_dumpBefore.data());
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PhaseScope.h b/b3/B3PhaseScope.h
new file mode 100644
index 0000000..a176988
--- /dev/null
+++ b/b3/B3PhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class PhaseScope {
+    WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+    PhaseScope(Procedure&, const char* name);
+    ~PhaseScope(); // this does validation
+
+private:
+    Procedure& m_procedure;
+    const char* m_name;
+    TimingScope m_timingScope;
+    CString m_dumpBefore;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PhiChildren.cpp b/b3/B3PhiChildren.cpp
new file mode 100644
index 0000000..3b9b4e2
--- /dev/null
+++ b/b3/B3PhiChildren.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PhiChildren.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+PhiChildren::PhiChildren(Procedure& proc)
+    : m_upsilons(proc.values().size())
+{
+    for (Value* value : proc.values()) {
+        if (UpsilonValue* upsilon = value->as()) {
+            Value* phi = upsilon->phi();
+            Vector& vector = m_upsilons[phi];
+            if (vector.isEmpty())
+                m_phis.append(phi);
+            vector.append(upsilon);
+        }
+    }
+}
+
+PhiChildren::~PhiChildren()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3PhiChildren.h b/b3/B3PhiChildren.h
new file mode 100644
index 0000000..22b8277
--- /dev/null
+++ b/b3/B3PhiChildren.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3UpsilonValue.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class PhiChildren {
+public:
+    PhiChildren(Procedure&);
+    ~PhiChildren();
+
+    class ValueCollection {
+    public:
+        ValueCollection(Vector* values = nullptr)
+            : m_values(values)
+        {
+        }
+
+        unsigned size() const { return m_values->size(); }
+        Value* at(unsigned index) const { return m_values->at(index)->child(0); }
+        Value* operator[](unsigned index) const { return at(index); }
+
+        bool contains(Value* value) const
+        {
+            for (unsigned i = size(); i--;) {
+                if (at(i) == value)
+                    return true;
+            }
+            return false;
+        }
+
+        class iterator {
+        public:
+            iterator(Vector* values = nullptr, unsigned index = 0)
+                : m_values(values)
+                , m_index(index)
+            {
+            }
+
+            Value* operator*() const
+            {
+                return m_values->at(m_index)->child(0);
+            }
+
+            iterator& operator++()
+            {
+                m_index++;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_values == other.m_values);
+                return m_index == other.m_index;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+
+        private:
+            Vector* m_values;
+            unsigned m_index;
+        };
+
+        iterator begin() const { return iterator(m_values); }
+        iterator end() const { return iterator(m_values, m_values->size()); }
+
+    private:
+        Vector* m_values;
+    };
+    
+    class UpsilonCollection {
+    public:
+        UpsilonCollection()
+        {
+        }
+        
+        UpsilonCollection(PhiChildren* phiChildren, Value* value, Vector* values)
+            : m_phiChildren(phiChildren)
+            , m_value(value)
+            , m_values(values)
+        {
+        }
+
+        unsigned size() const { return m_values->size(); }
+        Value* at(unsigned index) const { return m_values->at(index); }
+        Value* operator[](unsigned index) const { return at(index); }
+
+        bool contains(Value* value) const { return m_values->contains(value); }
+
+        typedef Vector::const_iterator iterator;
+        Vector::const_iterator begin() const { return m_values->begin(); }
+        Vector::const_iterator end() const { return m_values->end(); }
+
+        ValueCollection values() { return ValueCollection(m_values); }
+        
+        template
+        void forAllTransitiveIncomingValues(const Functor& functor)
+        {
+            if (m_value->opcode() != Phi) {
+                functor(m_value);
+                return;
+            }
+            
+            GraphNodeWorklist worklist;
+            worklist.push(m_value);
+            while (Value* phi = worklist.pop()) {
+                for (Value* child : m_phiChildren->at(phi).values()) {
+                    if (child->opcode() == Phi)
+                        worklist.push(child);
+                    else
+                        functor(child);
+                }
+            }
+        }
+
+        bool transitivelyUses(Value* candidate)
+        {
+            bool result = false;
+            forAllTransitiveIncomingValues(
+                [&] (Value* child) {
+                    result |= child == candidate;
+                });
+            return result;
+        }
+
+    private:
+        PhiChildren* m_phiChildren { nullptr };
+        Value* m_value { nullptr };
+        Vector* m_values { nullptr };
+    };
+
+    UpsilonCollection at(Value* value) { return UpsilonCollection(this, value, &m_upsilons[value]); }
+    UpsilonCollection operator[](Value* value) { return at(value); }
+
+    const Vector& phis() const { return m_phis; }
+
+private:
+    IndexMap> m_upsilons;
+    Vector m_phis;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Procedure.cpp b/b3/B3Procedure.cpp
new file mode 100644
index 0000000..0cb48c4
--- /dev/null
+++ b/b3/B3Procedure.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Procedure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BasicBlockUtils.h"
+#include "B3BlockWorklist.h"
+#include "B3CFG.h"
+#include "B3DataSection.h"
+#include "B3Dominators.h"
+#include "B3OpaqueByproducts.h"
+#include "B3PhiChildren.h"
+#include "B3StackSlot.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+Procedure::Procedure()
+    : m_cfg(new CFG(*this))
+    , m_lastPhaseName("initial")
+    , m_byproducts(std::make_unique())
+    , m_code(new Air::Code(*this))
+{
+}
+
+Procedure::~Procedure()
+{
+}
+
+void Procedure::printOrigin(PrintStream& out, Origin origin) const
+{
+    if (m_originPrinter)
+        m_originPrinter->run(out, origin);
+    else
+        out.print(origin);
+}
+
+BasicBlock* Procedure::addBlock(double frequency)
+{
+    std::unique_ptr block(new BasicBlock(m_blocks.size(), frequency));
+    BasicBlock* result = block.get();
+    m_blocks.append(WTFMove(block));
+    return result;
+}
+
+StackSlot* Procedure::addStackSlot(unsigned byteSize)
+{
+    return m_stackSlots.addNew(byteSize);
+}
+
+Variable* Procedure::addVariable(Type type)
+{
+    return m_variables.addNew(type); 
+}
+
+Value* Procedure::clone(Value* value)
+{
+    std::unique_ptr clone(value->cloneImpl());
+    clone->m_index = UINT_MAX;
+    clone->owner = nullptr;
+    return m_values.add(WTFMove(clone));
+}
+
+Value* Procedure::addIntConstant(Origin origin, Type type, int64_t value)
+{
+    switch (type) {
+    case Int32:
+        return add(origin, static_cast(value));
+    case Int64:
+        return add(origin, value);
+    case Double:
+        return add(origin, static_cast(value));
+    case Float:
+        return add(origin, static_cast(value));
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+}
+
+Value* Procedure::addIntConstant(Value* likeValue, int64_t value)
+{
+    return addIntConstant(likeValue->origin(), likeValue->type(), value);
+}
+
+Value* Procedure::addBottom(Origin origin, Type type)
+{
+    return addIntConstant(origin, type, 0);
+}
+
+Value* Procedure::addBottom(Value* value)
+{
+    return addBottom(value->origin(), value->type());
+}
+
+Value* Procedure::addBoolConstant(Origin origin, TriState triState)
+{
+    int32_t value = 0;
+    switch (triState) {
+    case FalseTriState:
+        value = 0;
+        break;
+    case TrueTriState:
+        value = 1;
+        break;
+    case MixedTriState:
+        return nullptr;
+    }
+
+    return addIntConstant(origin, Int32, value);
+}
+
+void Procedure::resetValueOwners()
+{
+    for (BasicBlock* block : *this) {
+        for (Value* value : *block)
+            value->owner = block;
+    }
+}
+
+void Procedure::resetReachability()
+{
+    recomputePredecessors(m_blocks);
+    
+    // The common case is that this does not find any dead blocks.
+    bool foundDead = false;
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get())) {
+            foundDead = true;
+            break;
+        }
+    }
+    if (!foundDead)
+        return;
+    
+    resetValueOwners();
+
+    for (Value* value : values()) {
+        if (UpsilonValue* upsilon = value->as()) {
+            if (isBlockDead(upsilon->phi()->owner))
+                upsilon->replaceWithNop();
+        }
+    }
+    
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get())) {
+            for (Value* value : *block)
+                deleteValue(value);
+            block = nullptr;
+        }
+    }
+}
+
+void Procedure::invalidateCFG()
+{
+    m_dominators = nullptr;
+}
+
+void Procedure::dump(PrintStream& out) const
+{
+    IndexSet valuesInBlocks;
+    for (BasicBlock* block : *this) {
+        out.print(deepDump(*this, block));
+        valuesInBlocks.addAll(*block);
+    }
+    bool didPrint = false;
+    for (Value* value : values()) {
+        if (valuesInBlocks.contains(value))
+            continue;
+
+        if (!didPrint) {
+            dataLog("Orphaned values:\n");
+            didPrint = true;
+        }
+        dataLog("    ", deepDump(*this, value), "\n");
+    }
+    if (variables().size()) {
+        out.print("Variables:\n");
+        for (Variable* variable : variables())
+            out.print("    ", deepDump(variable), "\n");
+    }
+    if (stackSlots().size()) {
+        out.print("Stack slots:\n");
+        for (StackSlot* slot : stackSlots())
+            out.print("    ", pointerDump(slot), ": ", deepDump(slot), "\n");
+    }
+    if (m_byproducts->count())
+        out.print(*m_byproducts);
+}
+
+Vector Procedure::blocksInPreOrder()
+{
+    return B3::blocksInPreOrder(at(0));
+}
+
+Vector Procedure::blocksInPostOrder()
+{
+    return B3::blocksInPostOrder(at(0));
+}
+
+void Procedure::deleteStackSlot(StackSlot* stackSlot)
+{
+    m_stackSlots.remove(stackSlot);
+}
+
+void Procedure::deleteVariable(Variable* variable)
+{
+    m_variables.remove(variable);
+}
+
+void Procedure::deleteValue(Value* value)
+{
+    m_values.remove(value);
+}
+
+void Procedure::deleteOrphans()
+{
+    IndexSet valuesInBlocks;
+    for (BasicBlock* block : *this)
+        valuesInBlocks.addAll(*block);
+
+    // Since this method is not on any hot path, we do it conservatively: first a pass to
+    // identify the values to be removed, and then a second pass to remove them. This avoids any
+    // risk of the value iteration being broken by removals.
+    Vector toRemove;
+    for (Value* value : values()) {
+        if (!valuesInBlocks.contains(value))
+            toRemove.append(value);
+        else if (UpsilonValue* upsilon = value->as()) {
+            if (!valuesInBlocks.contains(upsilon->phi()))
+                upsilon->replaceWithNop();
+        }
+    }
+
+    for (Value* value : toRemove)
+        deleteValue(value);
+}
+
+Dominators& Procedure::dominators()
+{
+    if (!m_dominators)
+        m_dominators = std::make_unique(*this);
+    return *m_dominators;
+}
+
+void Procedure::addFastConstant(const ValueKey& constant)
+{
+    RELEASE_ASSERT(constant.isConstant());
+    m_fastConstants.add(constant);
+}
+
+bool Procedure::isFastConstant(const ValueKey& constant)
+{
+    if (!constant)
+        return false;
+    return m_fastConstants.contains(constant);
+}
+
+CCallHelpers::Label Procedure::entrypointLabel(unsigned index) const
+{
+    return m_code->entrypointLabel(index);
+}
+
+void* Procedure::addDataSection(size_t size)
+{
+    if (!size)
+        return nullptr;
+    std::unique_ptr dataSection = std::make_unique(size);
+    void* result = dataSection->data();
+    m_byproducts->add(WTFMove(dataSection));
+    return result;
+}
+
+unsigned Procedure::callArgAreaSizeInBytes() const
+{
+    return code().callArgAreaSizeInBytes();
+}
+
+void Procedure::requestCallArgAreaSizeInBytes(unsigned size)
+{
+    code().requestCallArgAreaSizeInBytes(size);
+}
+
+void Procedure::pinRegister(Reg reg)
+{
+    code().pinRegister(reg);
+}
+
+unsigned Procedure::frameSize() const
+{
+    return code().frameSize();
+}
+
+const RegisterAtOffsetList& Procedure::calleeSaveRegisters() const
+{
+    return code().calleeSaveRegisters();
+}
+
+Value* Procedure::addValueImpl(Value* value)
+{
+    return m_values.add(std::unique_ptr(value));
+}
+
+void Procedure::setBlockOrderImpl(Vector& blocks)
+{
+    IndexSet blocksSet;
+    blocksSet.addAll(blocks);
+
+    for (BasicBlock* block : *this) {
+        if (!blocksSet.contains(block))
+            blocks.append(block);
+    }
+
+    // Place blocks into this's block list by first leaking all of the blocks and then readopting
+    // them.
+    for (auto& entry : m_blocks)
+        entry.release();
+
+    m_blocks.resize(blocks.size());
+    for (unsigned i = 0; i < blocks.size(); ++i) {
+        BasicBlock* block = blocks[i];
+        block->m_index = i;
+        m_blocks[i] = std::unique_ptr(block);
+    }
+}
+
+void Procedure::setWasmBoundsCheckGenerator(RefPtr generator)
+{
+    code().setWasmBoundsCheckGenerator(generator);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Procedure.h b/b3/B3Procedure.h
new file mode 100644
index 0000000..2236145
--- /dev/null
+++ b/b3/B3Procedure.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3OpaqueByproducts.h"
+#include "B3Origin.h"
+#include "B3PCToOriginMap.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include "CCallHelpers.h"
+#include "PureNaN.h"
+#include "RegisterAtOffsetList.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class BlockInsertionSet;
+class CFG;
+class Dominators;
+class StackSlot;
+class Value;
+class Variable;
+
+namespace Air { class Code; }
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask WasmBoundsCheckGenerator;
+
+// This represents B3's view of a piece of code. Note that this object must exist in a 1:1
+// relationship with Air::Code. B3::Procedure and Air::Code are just different facades of the B3
+// compiler's knowledge about a piece of code. Some kinds of state aren't perfect fits for either
+// Procedure or Code, and are placed in one or the other based on convenience. Procedure always
+// allocates a Code, and a Code cannot be allocated without an owning Procedure and they always
+// have references to each other.
+
+class Procedure {
+    WTF_MAKE_NONCOPYABLE(Procedure);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+
+    JS_EXPORT_PRIVATE Procedure();
+    JS_EXPORT_PRIVATE ~Procedure();
+
+    template
+    void setOriginPrinter(Callback&& callback)
+    {
+        m_originPrinter = createSharedTask(
+            std::forward(callback));
+    }
+
+    // Usually you use this via OriginDump, though it's cool to use it directly.
+    void printOrigin(PrintStream& out, Origin origin) const;
+
+    // This is a debugging hack. Sometimes while debugging B3 you need to break the abstraction
+    // and get at the DFG Graph, or whatever data structure the frontend used to describe the
+    // program. The FTL passes the DFG Graph.
+    void setFrontendData(const void* value) { m_frontendData = value; }
+    const void* frontendData() const { return m_frontendData; }
+
+    JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+    // Changes the order of basic blocks to be as in the supplied vector. The vector does not
+    // need to mention every block in the procedure. Blocks not mentioned will be placed after
+    // these blocks in the same order as they were in originally.
+    template
+    void setBlockOrder(const BlockIterable& iterable)
+    {
+        Vector blocks;
+        for (BasicBlock* block : iterable)
+            blocks.append(block);
+        setBlockOrderImpl(blocks);
+    }
+
+    JS_EXPORT_PRIVATE StackSlot* addStackSlot(unsigned byteSize);
+    JS_EXPORT_PRIVATE Variable* addVariable(Type);
+    
+    template
+    ValueType* add(Arguments...);
+
+    Value* clone(Value*);
+
+    Value* addIntConstant(Origin, Type, int64_t value);
+    Value* addIntConstant(Value*, int64_t value);
+
+    Value* addBottom(Origin, Type);
+    Value* addBottom(Value*);
+
+    // Returns null for MixedTriState.
+    Value* addBoolConstant(Origin, TriState);
+
+    void resetValueOwners();
+    JS_EXPORT_PRIVATE void resetReachability();
+
+    // This destroys CFG analyses. If we ask for them again, we will recompute them. Usually you
+    // should call this anytime you call resetReachability().
+    void invalidateCFG();
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    unsigned size() const { return m_blocks.size(); }
+    BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+    BasicBlock* operator[](unsigned index) const { return at(index); }
+
+    typedef WTF::IndexedContainerIterator iterator;
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+    Vector blocksInPreOrder();
+    Vector blocksInPostOrder();
+
+    SparseCollection& stackSlots() { return m_stackSlots; }
+    const SparseCollection& stackSlots() const { return m_stackSlots; }
+
+    // Short for stackSlots().remove(). It's better to call this method since it's out of line.
+    void deleteStackSlot(StackSlot*);
+
+    SparseCollection& variables() { return m_variables; }
+    const SparseCollection& variables() const { return m_variables; }
+
+    // Short for variables().remove(). It's better to call this method since it's out of line.
+    void deleteVariable(Variable*);
+
+    SparseCollection& values() { return m_values; }
+    const SparseCollection& values() const { return m_values; }
+
+    // Short for values().remove(). It's better to call this method since it's out of line.
+    void deleteValue(Value*);
+
+    // A valid procedure cannot contain any orphan values. An orphan is a value that is not in
+    // any basic block. It is possible to create an orphan value during code generation or during
+    // transformation. If you know that you may have created some, you can call this method to
+    // delete them, making the procedure valid again.
+    void deleteOrphans();
+
+    CFG& cfg() const { return *m_cfg; }
+
+    Dominators& dominators();
+
+    void addFastConstant(const ValueKey&);
+    bool isFastConstant(const ValueKey&);
+    
+    unsigned numEntrypoints() const { return m_numEntrypoints; }
+    void setNumEntrypoints(unsigned numEntrypoints) { m_numEntrypoints = numEntrypoints; }
+    
+    // Only call this after code generation is complete. Note that the label for the 0th entrypoint
+    // should point to exactly where the code generation cursor was before you started generating
+    // code.
+    JS_EXPORT_PRIVATE CCallHelpers::Label entrypointLabel(unsigned entrypointIndex) const;
+
+    // The name has to be a string literal, since we don't do any memory management for the string.
+    void setLastPhaseName(const char* name)
+    {
+        m_lastPhaseName = name;
+    }
+
+    const char* lastPhaseName() const { return m_lastPhaseName; }
+
+    // Allocates a slab of memory that will be kept alive by anyone who keeps the resulting code
+    // alive. Great for compiler-generated data sections, like switch jump tables and constant pools.
+    // This returns memory that has been zero-initialized.
+    JS_EXPORT_PRIVATE void* addDataSection(size_t);
+
+    OpaqueByproducts& byproducts() { return *m_byproducts; }
+
+    // Below are methods that make sense to call after you have generated code for the procedure.
+
+    // You have to call this method after calling generate(). The code generated by B3::generate()
+    // will require you to keep this object alive for as long as that code is runnable. Usually, this
+    // just keeps alive things like the double constant pool and switch lookup tables. If this sounds
+    // confusing, you should probably be using the B3::Compilation API to compile code. If you use
+    // that API, then you don't have to worry about this.
+    std::unique_ptr releaseByproducts() { return WTFMove(m_byproducts); }
+
+    // This gives you direct access to Code. However, the idea is that clients of B3 shouldn't have to
+    // call this. So, Procedure has some methods (below) that expose some Air::Code functionality.
+    const Air::Code& code() const { return *m_code; }
+    Air::Code& code() { return *m_code; }
+
+    unsigned callArgAreaSizeInBytes() const;
+    void requestCallArgAreaSizeInBytes(unsigned size);
+
+    // This tells the register allocators to stay away from this register.
+    JS_EXPORT_PRIVATE void pinRegister(Reg);
+
+    JS_EXPORT_PRIVATE unsigned frameSize() const;
+    JS_EXPORT_PRIVATE const RegisterAtOffsetList& calleeSaveRegisters() const;
+
+    PCToOriginMap& pcToOriginMap() { return m_pcToOriginMap; }
+    PCToOriginMap releasePCToOriginMap() { return WTFMove(m_pcToOriginMap); }
+
+    JS_EXPORT_PRIVATE void setWasmBoundsCheckGenerator(RefPtr);
+
+    template
+    void setWasmBoundsCheckGenerator(const Functor& functor)
+    {
+        setWasmBoundsCheckGenerator(RefPtr(createSharedTask(functor)));
+    }
+
+private:
+    friend class BlockInsertionSet;
+
+    JS_EXPORT_PRIVATE Value* addValueImpl(Value*);
+    void setBlockOrderImpl(Vector&);
+
+    SparseCollection m_stackSlots;
+    SparseCollection m_variables;
+    Vector> m_blocks;
+    SparseCollection m_values;
+    std::unique_ptr m_cfg;
+    std::unique_ptr m_dominators;
+    HashSet m_fastConstants;
+    unsigned m_numEntrypoints { 1 };
+    const char* m_lastPhaseName;
+    std::unique_ptr m_byproducts;
+    std::unique_ptr m_code;
+    RefPtr> m_originPrinter;
+    const void* m_frontendData;
+    PCToOriginMap m_pcToOriginMap;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ProcedureInlines.h b/b3/B3ProcedureInlines.h
new file mode 100644
index 0000000..990ba31
--- /dev/null
+++ b/b3/B3ProcedureInlines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+template
+ValueType* Procedure::add(Arguments... arguments)
+{
+    return static_cast(addValueImpl(new ValueType(arguments...)));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3PureCSE.cpp b/b3/B3PureCSE.cpp
new file mode 100644
index 0000000..4030e28
--- /dev/null
+++ b/b3/B3PureCSE.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3PureCSE.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+PureCSE::PureCSE()
+{
+}
+
+PureCSE::~PureCSE()
+{
+}
+
+void PureCSE::clear()
+{
+    m_map.clear();
+}
+
+Value* PureCSE::findMatch(const ValueKey& key, BasicBlock* block, Dominators& dominators)
+{
+    if (!key)
+        return nullptr;
+
+    auto iter = m_map.find(key);
+    if (iter == m_map.end())
+        return nullptr;
+
+    for (Value* match : iter->value) {
+        if (dominators.dominates(match->owner, block))
+            return match;
+    }
+
+    return nullptr;
+}
+
+bool PureCSE::process(Value* value, Dominators& dominators)
+{
+    if (value->opcode() == Identity)
+        return false;
+
+    ValueKey key = value->key();
+    if (!key)
+        return false;
+
+    Matches& matches = m_map.add(key, Matches()).iterator->value;
+
+    for (Value* match : matches) {
+        if (dominators.dominates(match->owner, value->owner)) {
+            value->replaceWithIdentity(match);
+            return true;
+        }
+    }
+
+    matches.append(value);
+    return false;
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3PureCSE.h b/b3/B3PureCSE.h
new file mode 100644
index 0000000..942966c
--- /dev/null
+++ b/b3/B3PureCSE.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ValueKey.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class Dominators;
+class Value;
+
+typedef Vector Matches;
+
+// This is a reusable utility for doing pure CSE. You can use it to do pure CSE on a program by just
+// proceeding in order an calling process().
+class PureCSE {
+public:
+    PureCSE();
+    ~PureCSE();
+
+    void clear();
+
+    Value* findMatch(const ValueKey&, BasicBlock*, Dominators&);
+
+    bool process(Value*, Dominators&);
+    
+private:
+    HashMap m_map;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ReduceDoubleToFloat.cpp b/b3/B3ReduceDoubleToFloat.cpp
new file mode 100644
index 0000000..ef92811
--- /dev/null
+++ b/b3/B3ReduceDoubleToFloat.cpp
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ReduceDoubleToFloat.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include "B3InsertionSetInlines.h"
+#include "B3PhaseScope.h"
+#include "B3UseCounts.h"
+#include "B3ValueInlines.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+bool verbose = false;
+bool printRemainingConversions = false;
+
+class DoubleToFloatReduction {
+public:
+    DoubleToFloatReduction(Procedure& procedure)
+        : m_procedure(procedure)
+    {
+    }
+
+    void run()
+    {
+        if (!findCandidates())
+            return;
+
+        findPhisContainingFloat();
+
+        simplify();
+
+        cleanUp();
+    }
+
+private:
+    // This step find values that are used as Double and cannot be converted to Float..
+    // It flows the information backward through Phi-Upsilons.
+    bool findCandidates()
+    {
+        bool foundConversionCandidate = false;
+        Vector upsilons;
+
+        // First, we find all values that are strictly used as double.
+        // Those are values used by something else than DoubleToFloat.
+        //
+        // We don't know the state of Upsilons until their Phi has been
+        // set. We just keep a list of them and update them next.
+        for (BasicBlock* block : m_procedure) {
+            for (Value* value : *block) {
+                value->performSubstitution();
+
+                if (value->opcode() == DoubleToFloat) {
+                    foundConversionCandidate = true;
+
+                    Value* child = value->child(0);
+                    if (child->opcode() == FloatToDouble) {
+                        // We don't really need to simplify this early but it simplifies debugging.
+                        value->replaceWithIdentity(child->child(0));
+                    }
+                    continue;
+                }
+
+                if (value->opcode() == FloatToDouble)
+                    foundConversionCandidate = true;
+
+                if (value->opcode() == Upsilon) {
+                    Value* child = value->child(0);
+                    if (child->type() == Double)
+                        upsilons.append(value);
+                    continue;
+                }
+
+                for (Value* child : value->children()) {
+                    if (child->type() == Double)
+                        m_valuesUsedAsDouble.add(child);
+                }
+            }
+        }
+
+        if (!foundConversionCandidate)
+            return false;
+
+        // Now we just need to propagate through Phi-Upsilon.
+        // A Upsilon can convert its input to float if its phi is never used as double.
+        // If we modify a phi, we need to continue until all the Upsilon-Phi converge.
+        bool changedPhiState;
+        do {
+            changedPhiState = false;
+            for (Value* value : upsilons) {
+                UpsilonValue* upsilon = value->as();
+                Value* phi = upsilon->phi();
+                if (!m_valuesUsedAsDouble.contains(phi))
+                    continue;
+
+                Value* child = value->child(0);
+                bool childChanged = m_valuesUsedAsDouble.add(child);
+                if (childChanged && child->opcode() == Phi)
+                    changedPhiState = true;
+            }
+        } while (changedPhiState);
+
+        if (verbose) {
+            dataLog("Conversion candidates:\n");
+            for (BasicBlock* block : m_procedure) {
+                for (Value* value : *block) {
+                    if (value->type() == Double && !m_valuesUsedAsDouble.contains(value))
+                        dataLog("    ", deepDump(m_procedure, value), "\n");
+                }
+            }
+            dataLog("\n");
+        }
+
+        return true;
+    }
+
+    // This step finds Phis of type Double that effectively contains Float values.
+    // It flows that information forward through Phi-Upsilons.
+    void findPhisContainingFloat()
+    {
+        Vector upsilons;
+
+        // The Double value that can be safely turned into a Float are:
+        // - FloatToDouble
+        // - ConstDouble with a value that converts to Float without losing precision.
+        for (BasicBlock* block : m_procedure) {
+            for (Value* value : *block) {
+                if (value->opcode() != Upsilon)
+                    continue;
+
+                Value* child = value->child(0);
+                if (child->type() != Double
+                    || child->opcode() == FloatToDouble)
+                    continue;
+
+                if (child->hasDouble()) {
+                    double constValue = child->asDouble();
+                    if (isIdentical(static_cast(static_cast(constValue)), constValue))
+                        continue;
+                }
+
+                if (child->opcode() == Phi) {
+                    upsilons.append(value);
+                    continue;
+                }
+
+                UpsilonValue* upsilon = value->as();
+                Value* phi = upsilon->phi();
+                m_phisContainingDouble.add(phi);
+            }
+        }
+
+        // Propagate the flags forward.
+        bool changedPhiState;
+        do {
+            changedPhiState = false;
+            for (Value* value : upsilons) {
+                Value* child = value->child(0);
+                if (m_phisContainingDouble.contains(child)) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* phi = upsilon->phi();
+                    changedPhiState |= m_phisContainingDouble.add(phi);
+                }
+            }
+        } while (changedPhiState);
+
+        if (verbose) {
+            dataLog("Phis containing float values:\n");
+            for (BasicBlock* block : m_procedure) {
+                for (Value* value : *block) {
+                    if (value->opcode() == Phi
+                        && value->type() == Double
+                        && !m_phisContainingDouble.contains(value))
+                        dataLog("    ", deepDump(m_procedure, value), "\n");
+                }
+            }
+            dataLog("\n");
+        }
+    }
+
+    bool canBeTransformedToFloat(Value* value)
+    {
+        if (value->opcode() == FloatToDouble)
+            return true;
+
+        if (value->hasDouble())
+            return true; // Double constant truncated to float.
+
+        if (value->opcode() == Phi) {
+            return value->type() == Float
+                || (value->type() == Double && !m_phisContainingDouble.contains(value));
+        }
+        return false;
+    }
+
+    Value* transformToFloat(Value* value, unsigned valueIndex, InsertionSet& insertionSet)
+    {
+        ASSERT(canBeTransformedToFloat(value));
+        if (value->opcode() == FloatToDouble)
+            return value->child(0);
+
+        if (value->hasDouble())
+            return insertionSet.insert(valueIndex, value->origin(), static_cast(value->asDouble()));
+
+        if (value->opcode() == Phi) {
+            ASSERT(value->type() == Double || value->type() == Float);
+            if (value->type() == Double)
+                convertPhi(value);
+            return value;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+
+    void convertPhi(Value* phi)
+    {
+        ASSERT(phi->opcode() == Phi);
+        ASSERT(phi->type() == Double);
+        phi->setType(Float);
+        m_convertedPhis.add(phi);
+    }
+
+    bool attemptTwoOperandsSimplify(Value* candidate, unsigned candidateIndex, InsertionSet& insertionSet)
+    {
+        Value* left = candidate->child(0);
+        Value* right = candidate->child(1);
+        if (!canBeTransformedToFloat(left) || !canBeTransformedToFloat(right))
+            return false;
+
+        m_convertedValue.add(candidate);
+        candidate->child(0) = transformToFloat(left, candidateIndex, insertionSet);
+        candidate->child(1) = transformToFloat(right, candidateIndex, insertionSet);
+        return true;
+    }
+
+    // Simplify Double operations into Float operations.
+    void simplify()
+    {
+        Vector upsilonReferencingDoublePhi;
+
+        InsertionSet insertionSet(m_procedure);
+        for (BasicBlock* block : m_procedure) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                Value* value = block->at(index);
+
+                switch (value->opcode()) {
+                case Equal:
+                case NotEqual:
+                case LessThan:
+                case GreaterThan:
+                case LessEqual:
+                case GreaterEqual:
+                case EqualOrUnordered:
+                    attemptTwoOperandsSimplify(value, index, insertionSet);
+                    continue;
+                case Upsilon: {
+                    Value* child = value->child(0);
+                    if (child->opcode() == Phi && child->type() == Double)
+                        upsilonReferencingDoublePhi.append(value);
+                    continue;
+                }
+                default:
+                    break;
+                }
+
+                if (m_valuesUsedAsDouble.contains(value))
+                    continue;
+
+                switch (value->opcode()) {
+                case Add:
+                case Sub:
+                case Mul:
+                case Div:
+                    if (attemptTwoOperandsSimplify(value, index, insertionSet))
+                        value->setType(Float);
+                    break;
+                case Abs:
+                case Ceil:
+                case Floor:
+                case Neg:
+                case Sqrt: {
+                    Value* child = value->child(0);
+                    if (canBeTransformedToFloat(child)) {
+                        value->child(0) = transformToFloat(child, index, insertionSet);
+                        value->setType(Float);
+                        m_convertedValue.add(value);
+                    }
+                    break;
+                }
+                case IToD: {
+                    Value* iToF = insertionSet.insert(index, IToF, value->origin(), value->child(0));
+                    value->setType(Float);
+                    value->replaceWithIdentity(iToF);
+                    m_convertedValue.add(value);
+                    break;
+                }
+                case FloatToDouble:
+                    // This happens if we round twice.
+                    // Typically, this is indirect through Phi-Upsilons.
+                    // The Upsilon rounds and the Phi rounds.
+                    value->setType(Float);
+                    value->replaceWithIdentity(value->child(0));
+                    m_convertedValue.add(value);
+                    break;
+                case Phi:
+                    // If a Phi is always converted to Float, we always make it into a float Phi-Upsilon.
+                    // This is a simplistic view of things. Ideally we should keep type that will minimize
+                    // the amount of conversion in the loop.
+                    if (value->type() == Double)
+                        convertPhi(value);
+                    break;
+                default:
+                    break;
+                }
+            }
+            insertionSet.execute(block);
+        }
+
+        if (!upsilonReferencingDoublePhi.isEmpty()) {
+            // If a Phi contains Float values typed as Double, but is not used as Float
+            // by a non-trivial operation, we did not convert it.
+            //
+            // We fix that now by converting the remaining phis that contains
+            // float but where not converted to float.
+            bool changedPhi;
+            do {
+                changedPhi = false;
+
+                for (Value* value : upsilonReferencingDoublePhi) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* child = value->child(0);
+                    Value* phi = upsilon->phi();
+                    if (phi->type() == Float && child->type() == Double
+                        && !m_phisContainingDouble.contains(child)) {
+                        convertPhi(child);
+                        changedPhi = true;
+                    }
+                }
+
+            } while (changedPhi);
+        }
+    }
+
+    // We are in an inconsistent state where we have
+    // DoubleToFloat nodes over values producing float and Phis that are
+    // float for Upsilons that are Double.
+    //
+    // This steps puts us back in a consistent state.
+    void cleanUp()
+    {
+        InsertionSet insertionSet(m_procedure);
+
+        for (BasicBlock* block : m_procedure) {
+            for (unsigned index = 0; index < block->size(); ++index) {
+                Value* value = block->at(index);
+                if (value->opcode() == DoubleToFloat && value->child(0)->type() == Float) {
+                    value->replaceWithIdentity(value->child(0));
+                    continue;
+                }
+
+                if (value->opcode() == Upsilon) {
+                    UpsilonValue* upsilon = value->as();
+                    Value* child = value->child(0);
+                    Value* phi = upsilon->phi();
+
+                    if (phi->type() == Float) {
+                        if (child->type() == Double) {
+                            Value* newChild = nullptr;
+                            if (child->opcode() == FloatToDouble)
+                                newChild = child->child(0);
+                            else if (child->hasDouble())
+                                newChild = insertionSet.insert(index, child->origin(), static_cast(child->asDouble()));
+                            else
+                                newChild = insertionSet.insert(index, DoubleToFloat, upsilon->origin(), child);
+                            upsilon->child(0) = newChild;
+                        }
+                        continue;
+                    }
+                }
+
+                if (!m_convertedValue.contains(value)) {
+                    // Phis can be converted from Double to Float if the value they contain
+                    // is not more precise than a Float.
+                    // If the value is needed as Double, it has to be converted back.
+                    for (Value*& child : value->children()) {
+                        if (m_convertedPhis.contains(child))
+                            child = insertionSet.insert(index, FloatToDouble, value->origin(), child);
+                    }
+                }
+            }
+            insertionSet.execute(block);
+        }
+    }
+
+    Procedure& m_procedure;
+
+    // Set of all the Double values that are actually used as Double.
+    // Converting any of them to Float would lose precision.
+    IndexSet m_valuesUsedAsDouble;
+
+    // Set of all the Phi of type Double that really contains a Double.
+    // Any Double Phi not in the set can be converted to Float without losing precision.
+    IndexSet m_phisContainingDouble;
+
+    // Any value that was converted from producing a Double to producing a Float.
+    // This set does not include Phi-Upsilons.
+    IndexSet m_convertedValue;
+
+    // Any value that previously produced Double and now produce Float.
+    IndexSet m_convertedPhis;
+};
+
+void printGraphIfConverting(Procedure& procedure)
+{
+    if (!printRemainingConversions)
+        return;
+
+    UseCounts useCount(procedure);
+
+    Vector doubleToFloat;
+    Vector floatToDouble;
+
+    for (BasicBlock* block : procedure) {
+        for (Value* value : *block) {
+            if (!useCount.numUses(value))
+                continue;
+
+            if (value->opcode() == DoubleToFloat)
+                doubleToFloat.append(value);
+            if (value->opcode() == FloatToDouble)
+                floatToDouble.append(value);
+        }
+    }
+
+    if (doubleToFloat.isEmpty() && floatToDouble.isEmpty())
+        return;
+
+    dataLog("Procedure with Float-Double conversion:\n", procedure, "\n");
+    dataLog("Converting nodes:\n");
+    for (Value* value : doubleToFloat)
+        dataLog("    ", deepDump(procedure, value), "\n");
+    for (Value* value : floatToDouble)
+        dataLog("    ", deepDump(procedure, value), "\n");
+
+}
+
+} // anonymous namespace.
+
+void reduceDoubleToFloat(Procedure& procedure)
+{
+    PhaseScope phaseScope(procedure, "reduceDoubleToFloat");
+
+    if (verbose)
+        dataLog("Before DoubleToFloatReduction:\n", procedure, "\n");
+
+    DoubleToFloatReduction doubleToFloatReduction(procedure);
+    doubleToFloatReduction.run();
+
+    if (verbose)
+        dataLog("After DoubleToFloatReduction:\n", procedure, "\n");
+
+    printGraphIfConverting(procedure);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3ReduceDoubleToFloat.h b/b3/B3ReduceDoubleToFloat.h
new file mode 100644
index 0000000..899f770
--- /dev/null
+++ b/b3/B3ReduceDoubleToFloat.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Change Double operations to Float operations when the difference is not observable
+// and doing so is likely beneficial.
+void reduceDoubleToFloat(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ReduceStrength.cpp b/b3/B3ReduceStrength.cpp
new file mode 100644
index 0000000..43c7302
--- /dev/null
+++ b/b3/B3ReduceStrength.cpp
@@ -0,0 +1,2518 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ReduceStrength.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include "B3BlockInsertionSet.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Dominators.h"
+#include "B3InsertionSetInlines.h"
+#include "B3MemoryValue.h"
+#include "B3PhaseScope.h"
+#include "B3PhiChildren.h"
+#include "B3ProcedureInlines.h"
+#include "B3PureCSE.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueKeyInlines.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+// The goal of this phase is to:
+//
+// - Replace operations with less expensive variants. This includes constant folding and classic
+//   strength reductions like turning Mul(x, 1 << k) into Shl(x, k).
+//
+// - Reassociate constant operations. For example, Load(Add(x, c)) is turned into Load(x, offset = c)
+//   and Add(Add(x, c), d) is turned into Add(x, c + d).
+//
+// - Canonicalize operations. There are some cases where it's not at all obvious which kind of
+//   operation is less expensive, but it's useful for subsequent phases - particularly LowerToAir -
+//   to have only one way of representing things.
+//
+// This phase runs to fixpoint. Therefore, the canonicalizations must be designed to be monotonic.
+// For example, if we had a canonicalization that said that Add(x, -c) should be Sub(x, c) and
+// another canonicalization that said that Sub(x, d) should be Add(x, -d), then this phase would end
+// up running forever. We don't want that.
+//
+// Therefore, we need to prioritize certain canonical forms over others. Naively, we want strength
+// reduction to reduce the number of values, and so a form involving fewer total values is more
+// canonical. But we might break this, for example when reducing strength of Mul(x, 9). This could be
+// better written as Add(Shl(x, 3), x), which also happens to be representable using a single
+// instruction on x86.
+//
+// Here are some of the rules we have:
+//
+// Canonical form of logical not: BitXor(value, 1). We may have to avoid using this form if we don't
+// know for sure that 'value' is 0-or-1 (i.e. returnsBool). In that case we fall back on
+// Equal(value, 0).
+//
+// Canonical form of commutative operations: if the operation involves a constant, the constant must
+// come second. Add(x, constant) is canonical, while Add(constant, x) is not. If there are no
+// constants then the canonical form involves the lower-indexed value first. Given Add(x, y), it's
+// canonical if x->index() <= y->index().
+
+bool verbose = false;
+
+// FIXME: This IntRange stuff should be refactored into a general constant propagator. It's weird
+// that it's just sitting here in this file.
+class IntRange {
+public:
+    IntRange()
+    {
+    }
+
+    IntRange(int64_t min, int64_t max)
+        : m_min(min)
+        , m_max(max)
+    {
+    }
+
+    template
+    static IntRange top()
+    {
+        return IntRange(std::numeric_limits::min(), std::numeric_limits::max());
+    }
+
+    static IntRange top(Type type)
+    {
+        switch (type) {
+        case Int32:
+            return top();
+        case Int64:
+            return top();
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    static IntRange rangeForMask(T mask)
+    {
+        if (!(mask + 1))
+            return top();
+        return IntRange(0, mask);
+    }
+
+    static IntRange rangeForMask(int64_t mask, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return rangeForMask(static_cast(mask));
+        case Int64:
+            return rangeForMask(mask);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    static IntRange rangeForZShr(int32_t shiftAmount)
+    {
+        typename std::make_unsigned::type mask = 0;
+        mask--;
+        mask >>= shiftAmount;
+        return rangeForMask(static_cast(mask));
+    }
+
+    static IntRange rangeForZShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return rangeForZShr(shiftAmount);
+        case Int64:
+            return rangeForZShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    int64_t min() const { return m_min; }
+    int64_t max() const { return m_max; }
+
+    void dump(PrintStream& out) const
+    {
+        out.print("[", m_min, ",", m_max, "]");
+    }
+
+    template
+    bool couldOverflowAdd(const IntRange& other)
+    {
+        return sumOverflows(m_min, other.m_min)
+            || sumOverflows(m_min, other.m_max)
+            || sumOverflows(m_max, other.m_min)
+            || sumOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowAdd(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowAdd(other);
+        case Int64:
+            return couldOverflowAdd(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    bool couldOverflowSub(const IntRange& other)
+    {
+        return differenceOverflows(m_min, other.m_min)
+            || differenceOverflows(m_min, other.m_max)
+            || differenceOverflows(m_max, other.m_min)
+            || differenceOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowSub(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowSub(other);
+        case Int64:
+            return couldOverflowSub(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    bool couldOverflowMul(const IntRange& other)
+    {
+        return productOverflows(m_min, other.m_min)
+            || productOverflows(m_min, other.m_max)
+            || productOverflows(m_max, other.m_min)
+            || productOverflows(m_max, other.m_max);
+    }
+
+    bool couldOverflowMul(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return couldOverflowMul(other);
+        case Int64:
+            return couldOverflowMul(other);
+        default:
+            return true;
+        }
+    }
+
+    template
+    IntRange shl(int32_t shiftAmount)
+    {
+        T newMin = static_cast(m_min) << static_cast(shiftAmount);
+        T newMax = static_cast(m_max) << static_cast(shiftAmount);
+
+        if ((newMin >> shiftAmount) != static_cast(m_min))
+            newMin = std::numeric_limits::min();
+        if ((newMax >> shiftAmount) != static_cast(m_max))
+            newMax = std::numeric_limits::max();
+
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange shl(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return shl(shiftAmount);
+        case Int64:
+            return shl(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange sShr(int32_t shiftAmount)
+    {
+        T newMin = static_cast(m_min) >> static_cast(shiftAmount);
+        T newMax = static_cast(m_max) >> static_cast(shiftAmount);
+
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange sShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return sShr(shiftAmount);
+        case Int64:
+            return sShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange zShr(int32_t shiftAmount)
+    {
+        // This is an awkward corner case for all of the other logic.
+        if (!shiftAmount)
+            return *this;
+
+        // If the input range may be negative, then all we can say about the output range is that it
+        // will be masked. That's because -1 right shifted just produces that mask.
+        if (m_min < 0)
+            return rangeForZShr(shiftAmount);
+
+        // If the input range is non-negative, then this just brings the range closer to zero.
+        typedef typename std::make_unsigned::type UnsignedT;
+        UnsignedT newMin = static_cast(m_min) >> static_cast(shiftAmount);
+        UnsignedT newMax = static_cast(m_max) >> static_cast(shiftAmount);
+        
+        return IntRange(newMin, newMax);
+    }
+
+    IntRange zShr(int32_t shiftAmount, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return zShr(shiftAmount);
+        case Int64:
+            return zShr(shiftAmount);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange add(const IntRange& other)
+    {
+        if (couldOverflowAdd(other))
+            return top();
+        return IntRange(m_min + other.m_min, m_max + other.m_max);
+    }
+
+    IntRange add(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return add(other);
+        case Int64:
+            return add(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange sub(const IntRange& other)
+    {
+        if (couldOverflowSub(other))
+            return top();
+        return IntRange(m_min - other.m_max, m_max - other.m_min);
+    }
+
+    IntRange sub(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return sub(other);
+        case Int64:
+            return sub(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+    template
+    IntRange mul(const IntRange& other)
+    {
+        if (couldOverflowMul(other))
+            return top();
+        return IntRange(
+            std::min(
+                std::min(m_min * other.m_min, m_min * other.m_max),
+                std::min(m_max * other.m_min, m_max * other.m_max)),
+            std::max(
+                std::max(m_min * other.m_min, m_min * other.m_max),
+                std::max(m_max * other.m_min, m_max * other.m_max)));
+    }
+
+    IntRange mul(const IntRange& other, Type type)
+    {
+        switch (type) {
+        case Int32:
+            return mul(other);
+        case Int64:
+            return mul(other);
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return IntRange();
+        }
+    }
+
+private:
+    int64_t m_min { 0 };
+    int64_t m_max { 0 };
+};
+
+class ReduceStrength {
+public:
+    ReduceStrength(Procedure& proc)
+        : m_proc(proc)
+        , m_insertionSet(proc)
+        , m_blockInsertionSet(proc)
+    {
+    }
+
+    bool run()
+    {
+        bool result = false;
+        bool first = true;
+        unsigned index = 0;
+        do {
+            m_changed = false;
+            m_changedCFG = false;
+            ++index;
+
+            if (first)
+                first = false;
+            else if (verbose) {
+                dataLog("B3 after iteration #", index - 1, " of reduceStrength:\n");
+                dataLog(m_proc);
+            }
+            
+            simplifyCFG();
+
+            if (m_changedCFG) {
+                m_proc.resetReachability();
+                m_proc.invalidateCFG();
+                m_changed = true;
+            }
+
+            // We definitely want to do DCE before we do CSE so that we don't hoist things. For
+            // example:
+            //
+            // @dead = Mul(@a, @b)
+            // ... lots of control flow and stuff
+            // @thing = Mul(@a, @b)
+            //
+            // If we do CSE before DCE, we will remove @thing and keep @dead. Effectively, we will
+            // "hoist" @thing. On the other hand, if we run DCE before CSE, we will kill @dead and
+            // keep @thing. That's better, since we usually want things to stay wherever the client
+            // put them. We're not actually smart enough to move things around at random.
+            killDeadCode();
+            
+            simplifySSA();
+            
+            m_proc.resetValueOwners();
+            m_dominators = &m_proc.dominators(); // Recompute if necessary.
+            m_pureCSE.clear();
+
+            for (BasicBlock* block : m_proc.blocksInPreOrder()) {
+                m_block = block;
+                
+                for (m_index = 0; m_index < block->size(); ++m_index) {
+                    if (verbose) {
+                        dataLog(
+                            "Looking at ", *block, " #", m_index, ": ",
+                            deepDump(m_proc, block->at(m_index)), "\n");
+                    }
+                    m_value = m_block->at(m_index);
+                    m_value->performSubstitution();
+                    
+                    reduceValueStrength();
+                    replaceIfRedundant();
+                }
+                m_insertionSet.execute(m_block);
+            }
+
+            m_changedCFG |= m_blockInsertionSet.execute();
+            if (m_changedCFG) {
+                m_proc.resetReachability();
+                m_proc.invalidateCFG();
+                m_dominators = nullptr; // Dominators are not valid anymore, and we don't need them yet.
+                m_changed = true;
+            }
+            
+            result |= m_changed;
+        } while (m_changed);
+        return result;
+    }
+    
+private:
+    void reduceValueStrength()
+    {
+        switch (m_value->opcode()) {
+        case Add:
+            handleCommutativity();
+            
+            if (m_value->child(0)->opcode() == Add && isInt(m_value->type())) {
+                // Turn this: Add(Add(value, constant1), constant2)
+                // Into this: Add(value, constant1 + constant2)
+                Value* newSum = m_value->child(1)->addConstant(m_proc, m_value->child(0)->child(1));
+                if (newSum) {
+                    m_insertionSet.insertValue(m_index, newSum);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newSum;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: Add(Add(value, constant), otherValue)
+                // Into this: Add(Add(value, otherValue), constant)
+                if (!m_value->child(1)->hasInt() && m_value->child(0)->child(1)->hasInt()) {
+                    Value* value = m_value->child(0)->child(0);
+                    Value* constant = m_value->child(0)->child(1);
+                    Value* otherValue = m_value->child(1);
+                    // This could create duplicate code if Add(value, constant) is used elsewhere.
+                    // However, we already model adding a constant as if it was free in other places
+                    // so let's just roll with it. The alternative would mean having to do good use
+                    // counts, which reduceStrength() currently doesn't have.
+                    m_value->child(0) =
+                        m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), value, otherValue);
+                    m_value->child(1) = constant;
+                    m_changed = true;
+                    break;
+                }
+            }
+            
+            // Turn this: Add(otherValue, Add(value, constant))
+            // Into this: Add(Add(value, otherValue), constant)
+            if (isInt(m_value->type())
+                && !m_value->child(0)->hasInt()
+                && m_value->child(1)->opcode() == Add
+                && m_value->child(1)->child(1)->hasInt()) {
+                Value* value = m_value->child(1)->child(0);
+                Value* constant = m_value->child(1)->child(1);
+                Value* otherValue = m_value->child(0);
+                // This creates a duplicate add. That's dangerous but probably fine, see above.
+                m_value->child(0) =
+                    m_insertionSet.insert(
+                        m_index, Add, m_value->origin(), value, otherValue);
+                m_value->child(1) = constant;
+                m_changed = true;
+                break;
+            }
+            
+            // Turn this: Add(constant1, constant2)
+            // Into this: constant1 + constant2
+            if (Value* constantAdd = m_value->child(0)->addConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantAdd);
+                break;
+            }
+
+            // Turn this: Integer Add(value, value)
+            // Into this: Shl(value, 1)
+            // This is a useful canonicalization. It's not meant to be a strength reduction.
+            if (m_value->isInteger() && m_value->child(0) == m_value->child(1)) {
+                replaceWithNewValue(
+                    m_proc.add(
+                        Shl, m_value->origin(), m_value->child(0),
+                        m_insertionSet.insert(m_index, m_value->origin(), 1)));
+                break;
+            }
+
+            // Turn this: Add(value, zero)
+            // Into an Identity.
+            //
+            // Addition is subtle with doubles. Zero is not the neutral value, negative zero is:
+            //    0 + 0 = 0
+            //    0 + -0 = 0
+            //    -0 + 0 = 0
+            //    -0 + -0 = -0
+            if (m_value->child(1)->isInt(0) || m_value->child(1)->isNegativeZero()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Integer Add(Sub(0, value), -1)
+            // Into this: BitXor(value, -1)
+            if (m_value->isInteger()
+                && m_value->child(0)->opcode() == Sub
+                && m_value->child(1)->isInt(-1)
+                && m_value->child(0)->child(0)->isInt(0)) {
+                replaceWithNewValue(m_proc.add(BitXor, m_value->origin(), m_value->child(0)->child(1), m_value->child(1)));
+                break;
+            }
+
+            break;
+
+        case Sub:
+            // Turn this: Sub(constant1, constant2)
+            // Into this: constant1 - constant2
+            if (Value* constantSub = m_value->child(0)->subConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantSub);
+                break;
+            }
+
+            if (isInt(m_value->type())) {
+                // Turn this: Sub(value, constant)
+                // Into this: Add(value, -constant)
+                if (Value* negatedConstant = m_value->child(1)->negConstant(m_proc)) {
+                    m_insertionSet.insertValue(m_index, negatedConstant);
+                    replaceWithNew(
+                        Add, m_value->origin(), m_value->child(0), negatedConstant);
+                    break;
+                }
+                
+                // Turn this: Sub(0, value)
+                // Into this: Neg(value)
+                if (m_value->child(0)->isInt(0)) {
+                    replaceWithNew(Neg, m_value->origin(), m_value->child(1));
+                    break;
+                }
+            }
+
+            break;
+
+        case Neg:
+            // Turn this: Neg(constant)
+            // Into this: -constant
+            if (Value* constant = m_value->child(0)->negConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            
+            // Turn this: Neg(Neg(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == Neg) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+            
+            break;
+
+        case Mul:
+            handleCommutativity();
+
+            // Turn this: Mul(constant1, constant2)
+            // Into this: constant1 * constant2
+            if (Value* value = m_value->child(0)->mulConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(value);
+                break;
+            }
+
+            if (m_value->child(1)->hasInt()) {
+                int64_t factor = m_value->child(1)->asInt();
+
+                // Turn this: Mul(value, 0)
+                // Into this: 0
+                // Note that we don't do this for doubles because that's wrong. For example, -1 * 0
+                // and 1 * 0 yield different results.
+                if (!factor) {
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+                }
+
+                // Turn this: Mul(value, 1)
+                // Into this: value
+                if (factor == 1) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+
+                // Turn this: Mul(value, -1)
+                // Into this: Sub(0, value)
+                if (factor == -1) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            Sub, m_value->origin(),
+                            m_insertionSet.insertIntConstant(m_index, m_value, 0),
+                            m_value->child(0)));
+                    break;
+                }
+                
+                // Turn this: Mul(value, constant)
+                // Into this: Shl(value, log2(constant))
+                if (hasOneBitSet(factor)) {
+                    unsigned shiftAmount = WTF::fastLog2(static_cast(factor));
+                    replaceWithNewValue(
+                        m_proc.add(
+                            Shl, m_value->origin(), m_value->child(0),
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), shiftAmount)));
+                    break;
+                }
+            } else if (m_value->child(1)->hasDouble()) {
+                double factor = m_value->child(1)->asDouble();
+
+                // Turn this: Mul(value, 1)
+                // Into this: value
+                if (factor == 1) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+            }
+
+            break;
+
+        case Div:
+            // Turn this: Div(constant1, constant2)
+            // Into this: constant1 / constant2
+            // Note that this uses Div semantics. That's fine, because the rules for Div
+            // are strictly weaker: it has corner cases where it's allowed to do anything it
+            // likes.
+            if (replaceWithNewValue(m_value->child(0)->divConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case -1:
+                    // Turn this: Div(value, -1)
+                    // Into this: Neg(value)
+                    replaceWithNewValue(
+                        m_proc.add(Neg, m_value->origin(), m_value->child(0)));
+                    break;
+
+                case 0:
+                    // Turn this: Div(value, 0)
+                    // Into this: 0
+                    // We can do this because it's precisely correct for ChillDiv and for Div we
+                    // are allowed to do whatever we want.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                case 1:
+                    // Turn this: Div(value, 1)
+                    // Into this: value
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+
+                default:
+                    // Perform super comprehensive strength reduction of division. Currently we
+                    // only do this for 32-bit divisions, since we need a high multiply
+                    // operation. We emulate it using 64-bit multiply. We can't emulate 64-bit
+                    // high multiply with a 128-bit multiply because we don't have a 128-bit
+                    // multiply. We could do it with a patchpoint if we cared badly enough.
+
+                    if (m_value->type() != Int32)
+                        break;
+
+                    int32_t divisor = m_value->child(1)->asInt32();
+                    DivisionMagic magic = computeDivisionMagic(divisor);
+
+                    // Perform the "high" multiplication. We do it just to get the high bits.
+                    // This is sort of like multiplying by the reciprocal, just more gnarly. It's
+                    // from Hacker's Delight and I don't claim to understand it.
+                    Value* magicQuotient = m_insertionSet.insert(
+                        m_index, Trunc, m_value->origin(),
+                        m_insertionSet.insert(
+                            m_index, ZShr, m_value->origin(),
+                            m_insertionSet.insert(
+                                m_index, Mul, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, SExt32, m_value->origin(), m_value->child(0)),
+                                m_insertionSet.insert(
+                                    m_index, m_value->origin(), magic.magicMultiplier)),
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), 32)));
+
+                    if (divisor > 0 && magic.magicMultiplier < 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), magicQuotient, m_value->child(0));
+                    }
+                    if (divisor < 0 && magic.magicMultiplier > 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, Sub, m_value->origin(), magicQuotient, m_value->child(0));
+                    }
+                    if (magic.shift > 0) {
+                        magicQuotient = m_insertionSet.insert(
+                            m_index, SShr, m_value->origin(), magicQuotient,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), magic.shift));
+                    }
+                    replaceWithIdentity(
+                        m_insertionSet.insert(
+                            m_index, Add, m_value->origin(), magicQuotient,
+                            m_insertionSet.insert(
+                                m_index, ZShr, m_value->origin(), magicQuotient,
+                                m_insertionSet.insert(
+                                    m_index, m_value->origin(), 31))));
+                    break;
+                }
+                break;
+            }
+            break;
+
+        case UDiv:
+            // Turn this: UDiv(constant1, constant2)
+            // Into this: constant1 / constant2
+            if (replaceWithNewValue(m_value->child(0)->uDivConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    // Turn this: UDiv(value, 0)
+                    // Into this: 0
+                    // We can do whatever we want here so we might as well do the chill thing,
+                    // in case we add chill versions of UDiv in the future.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                case 1:
+                    // Turn this: UDiv(value, 1)
+                    // Into this: value
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                default:
+                    // FIXME: We should do comprehensive strength reduction for unsigned numbers. Likely,
+                    // we will just want copy what llvm does. https://bugs.webkit.org/show_bug.cgi?id=164809
+                    break;
+                }
+            }
+            break;
+
+        case Mod:
+            // Turn this: Mod(constant1, constant2)
+            // Into this: constant1 / constant2
+            // Note that this uses Mod semantics.
+            if (replaceWithNewValue(m_value->child(0)->modConstant(m_proc, m_value->child(1))))
+                break;
+
+            // Modulo by constant is more efficient if we turn it into Div, and then let Div get
+            // optimized.
+            if (m_value->child(1)->hasInt()) {
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    // Turn this: Mod(value, 0)
+                    // Into this: 0
+                    // This is correct according to ChillMod semantics.
+                    replaceWithIdentity(m_value->child(1));
+                    break;
+
+                default:
+                    // Turn this: Mod(N, D)
+                    // Into this: Sub(N, Mul(Div(N, D), D))
+                    //
+                    // This is a speed-up because we use our existing Div optimizations.
+                    //
+                    // Here's an easier way to look at it:
+                    //     N % D = N - N / D * D
+                    //
+                    // Note that this does not work for D = 0 and ChillMod. The expected result is 0.
+                    // That's why we have a special-case above.
+                    //     X % 0 = X - X / 0 * 0 = X     (should be 0)
+                    //
+                    // This does work for the D = -1 special case.
+                    //     -2^31 % -1 = -2^31 - -2^31 / -1 * -1
+                    //                = -2^31 - -2^31 * -1
+                    //                = -2^31 - -2^31
+                    //                = 0
+
+                    Kind divKind = Div;
+                    divKind.setIsChill(m_value->isChill());
+
+                    replaceWithIdentity(
+                        m_insertionSet.insert(
+                            m_index, Sub, m_value->origin(),
+                            m_value->child(0),
+                            m_insertionSet.insert(
+                                m_index, Mul, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, divKind, m_value->origin(),
+                                    m_value->child(0), m_value->child(1)),
+                                m_value->child(1))));
+                    break;
+                }
+                break;
+            }
+            
+            break;
+
+        case UMod:
+            // Turn this: UMod(constant1, constant2)
+            // Into this: constant1 / constant2
+            replaceWithNewValue(m_value->child(0)->uModConstant(m_proc, m_value->child(1)));
+            // FIXME: We should do what we do for Mod since the same principle applies here.
+            // https://bugs.webkit.org/show_bug.cgi?id=164809
+            break;
+
+        case BitAnd:
+            handleCommutativity();
+
+            // Turn this: BitAnd(constant1, constant2)
+            // Into this: constant1 & constant2
+            if (Value* constantBitAnd = m_value->child(0)->bitAndConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitAnd);
+                break;
+            }
+
+            // Turn this: BitAnd(BitAnd(value, constant1), constant2)
+            // Into this: BitAnd(value, constant1 & constant2).
+            if (m_value->child(0)->opcode() == BitAnd) {
+                Value* newConstant = m_value->child(1)->bitAndConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitAnd(valueX, valueX)
+            // Into this: valueX.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitAnd(value, zero-constant)
+            // Into this: zero-constant.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+
+            // Turn this: BitAnd(value, all-ones)
+            // Into this: value.
+            if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+                || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitAnd(64-bit value, 32 ones)
+            // Into this: ZExt32(Trunc(64-bit value))
+            if (m_value->child(1)->isInt64(0xffffffffllu)) {
+                Value* newValue = m_insertionSet.insert(
+                    m_index, ZExt32, m_value->origin(),
+                    m_insertionSet.insert(m_index, Trunc, m_value->origin(), m_value->child(0)));
+                replaceWithIdentity(newValue);
+                break;
+            }
+
+            // Turn this: BitAnd(SExt8(value), mask) where (mask & 0xffffff00) == 0
+            // Into this: BitAnd(value, mask)
+            if (m_value->child(0)->opcode() == SExt8 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffffff00)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(SExt16(value), mask) where (mask & 0xffff0000) == 0
+            // Into this: BitAnd(value, mask)
+            if (m_value->child(0)->opcode() == SExt16 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffff0000)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(SExt32(value), mask) where (mask & 0xffffffff00000000) == 0
+            // Into this: BitAnd(ZExt32(value), mask)
+            if (m_value->child(0)->opcode() == SExt32 && m_value->child(1)->hasInt32()
+                && !(m_value->child(1)->asInt32() & 0xffffffff00000000llu)) {
+                m_value->child(0) = m_insertionSet.insert(
+                    m_index, ZExt32, m_value->origin(),
+                    m_value->child(0)->child(0), m_value->child(0)->child(1));
+                m_changed = true;
+            }
+
+            // Turn this: BitAnd(Op(value, constant1), constant2)
+            //     where !(constant1 & constant2)
+            //       and Op is BitOr or BitXor
+            // into this: BitAnd(value, constant2)
+            if (m_value->child(1)->hasInt()) {
+                int64_t constant2 = m_value->child(1)->asInt();
+                switch (m_value->child(0)->opcode()) {
+                case BitOr:
+                case BitXor:
+                    if (m_value->child(0)->child(1)->hasInt()
+                        && !(m_value->child(0)->child(1)->asInt() & constant2)) {
+                        m_value->child(0) = m_value->child(0)->child(0);
+                        m_changed = true;
+                        break;
+                    }
+                    break;
+                default:
+                    break;
+                }
+            }
+            break;
+
+        case BitOr:
+            handleCommutativity();
+
+            // Turn this: BitOr(constant1, constant2)
+            // Into this: constant1 | constant2
+            if (Value* constantBitOr = m_value->child(0)->bitOrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitOr);
+                break;
+            }
+
+            // Turn this: BitOr(BitOr(value, constant1), constant2)
+            // Into this: BitOr(value, constant1 & constant2).
+            if (m_value->child(0)->opcode() == BitOr) {
+                Value* newConstant = m_value->child(1)->bitOrConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitOr(valueX, valueX)
+            // Into this: valueX.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitOr(value, zero-constant)
+            // Into this: value.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: BitOr(value, all-ones)
+            // Into this: all-ones.
+            if ((m_value->type() == Int64 && m_value->child(1)->isInt(0xffffffffffffffff))
+                || (m_value->type() == Int32 && m_value->child(1)->isInt(0xffffffff))) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+
+            break;
+
+        case BitXor:
+            handleCommutativity();
+
+            // Turn this: BitXor(constant1, constant2)
+            // Into this: constant1 ^ constant2
+            if (Value* constantBitXor = m_value->child(0)->bitXorConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constantBitXor);
+                break;
+            }
+
+            // Turn this: BitXor(BitXor(value, constant1), constant2)
+            // Into this: BitXor(value, constant1 ^ constant2).
+            if (m_value->child(0)->opcode() == BitXor) {
+                Value* newConstant = m_value->child(1)->bitXorConstant(m_proc, m_value->child(0)->child(1));
+                if (newConstant) {
+                    m_insertionSet.insertValue(m_index, newConstant);
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_value->child(1) = newConstant;
+                    m_changed = true;
+                }
+            }
+
+            // Turn this: BitXor(compare, 1)
+            // Into this: invertedCompare
+            if (m_value->child(1)->isInt32(1)) {
+                if (Value* invertedCompare = m_value->child(0)->invertedCompare(m_proc)) {
+                    replaceWithNewValue(invertedCompare);
+                    break;
+                }
+            }
+
+            // Turn this: BitXor(valueX, valueX)
+            // Into this: zero-constant.
+            if (m_value->child(0) == m_value->child(1)) {
+                replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+                break;
+            }
+
+            // Turn this: BitXor(value, zero-constant)
+            // Into this: value.
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            break;
+
+        case Shl:
+            // Turn this: Shl(constant1, constant2)
+            // Into this: constant1 << constant2
+            if (Value* constant = m_value->child(0)->shlConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case SShr:
+            // Turn this: SShr(constant1, constant2)
+            // Into this: constant1 >> constant2
+            if (Value* constant = m_value->child(0)->sShrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            if (m_value->child(1)->hasInt32()
+                && m_value->child(0)->opcode() == Shl
+                && m_value->child(0)->child(1)->hasInt32()
+                && m_value->child(1)->asInt32() == m_value->child(0)->child(1)->asInt32()) {
+                switch (m_value->child(1)->asInt32()) {
+                case 16:
+                    if (m_value->type() == Int32) {
+                        // Turn this: SShr(Shl(value, 16), 16)
+                        // Into this: SExt16(value)
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt16, m_value->origin(), m_value->child(0)->child(0)));
+                    }
+                    break;
+
+                case 24:
+                    if (m_value->type() == Int32) {
+                        // Turn this: SShr(Shl(value, 24), 24)
+                        // Into this: SExt8(value)
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt8, m_value->origin(), m_value->child(0)->child(0)));
+                    }
+                    break;
+
+                case 32:
+                    if (m_value->type() == Int64) {
+                        // Turn this: SShr(Shl(value, 32), 32)
+                        // Into this: SExt32(Trunc(value))
+                        replaceWithNewValue(
+                            m_proc.add(
+                                SExt32, m_value->origin(),
+                                m_insertionSet.insert(
+                                    m_index, Trunc, m_value->origin(),
+                                    m_value->child(0)->child(0))));
+                    }
+                    break;
+
+                // FIXME: Add cases for 48 and 56, but that would translate to SExt32(SExt8) or
+                // SExt32(SExt16), which we don't currently lower efficiently.
+
+                default:
+                    break;
+                }
+
+                if (m_value->opcode() != SShr)
+                    break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case ZShr:
+            // Turn this: ZShr(constant1, constant2)
+            // Into this: (unsigned)constant1 >> constant2
+            if (Value* constant = m_value->child(0)->zShrConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case RotR:
+            // Turn this: RotR(constant1, constant2)
+            // Into this: (constant1 >> constant2) | (constant1 << sizeof(constant1) * 8 - constant2)
+            if (Value* constant = m_value->child(0)->rotRConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case RotL:
+            // Turn this: RotL(constant1, constant2)
+            // Into this: (constant1 << constant2) | (constant1 >> sizeof(constant1) * 8 - constant2)
+            if (Value* constant = m_value->child(0)->rotLConstant(m_proc, m_value->child(1))) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            handleShiftAmount();
+            break;
+
+        case Abs:
+            // Turn this: Abs(constant)
+            // Into this: fabstype()>(constant)
+            if (Value* constant = m_value->child(0)->absConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Abs(Abs(value))
+            // Into this: Abs(value)
+            if (m_value->child(0)->opcode() == Abs) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Abs(BitwiseCast(value))
+            // Into this: BitwiseCast(And(value, mask-top-bit))
+            if (m_value->child(0)->opcode() == BitwiseCast) {
+                Value* mask;
+                if (m_value->type() == Double)
+                    mask = m_insertionSet.insert(m_index, m_value->origin(), ~(1ll << 63));
+                else
+                    mask = m_insertionSet.insert(m_index, m_value->origin(), ~(1l << 31));
+
+                Value* bitAnd = m_insertionSet.insert(m_index, BitAnd, m_value->origin(),
+                    m_value->child(0)->child(0),
+                    mask);
+                Value* cast = m_insertionSet.insert(m_index, BitwiseCast, m_value->origin(), bitAnd);
+                replaceWithIdentity(cast);
+                break;
+            }
+            break;
+
+        case Ceil:
+            // Turn this: Ceil(constant)
+            // Into this: ceiltype()>(constant)
+            if (Value* constant = m_value->child(0)->ceilConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Ceil(roundedValue)
+            // Into this: roundedValue
+            if (m_value->child(0)->isRounded()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+            break;
+
+        case Floor:
+            // Turn this: Floor(constant)
+            // Into this: floortype()>(constant)
+            if (Value* constant = m_value->child(0)->floorConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: Floor(roundedValue)
+            // Into this: roundedValue
+            if (m_value->child(0)->isRounded()) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+            break;
+
+        case Sqrt:
+            // Turn this: Sqrt(constant)
+            // Into this: sqrttype()>(constant)
+            if (Value* constant = m_value->child(0)->sqrtConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case BitwiseCast:
+            // Turn this: BitwiseCast(constant)
+            // Into this: bitwise_casttype()>(constant)
+            if (Value* constant = m_value->child(0)->bitwiseCastConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+
+            // Turn this: BitwiseCast(BitwiseCast(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == BitwiseCast) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+            break;
+
+        case SExt8:
+            // Turn this: SExt8(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                int32_t result = static_cast(m_value->child(0)->asInt32());
+                replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+                break;
+            }
+
+            // Turn this: SExt8(SExt8(value))
+            //   or this: SExt8(SExt16(value))
+            // Into this: SExt8(value)
+            if (m_value->child(0)->opcode() == SExt8 || m_value->child(0)->opcode() == SExt16) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+                Value* input = m_value->child(0)->child(0);
+                int32_t mask = m_value->child(0)->child(1)->asInt32();
+                
+                // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0xff) == 0xff
+                // Into this: SExt8(input)
+                if ((mask & 0xff) == 0xff) {
+                    m_value->child(0) = input;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: SExt8(BitAnd(input, mask)) where (mask & 0x80) == 0
+                // Into this: BitAnd(input, const & 0x7f)
+                if (!(mask & 0x80)) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            BitAnd, m_value->origin(), input,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), mask & 0x7f)));
+                    break;
+                }
+            }
+            break;
+
+        case SExt16:
+            // Turn this: SExt16(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                int32_t result = static_cast(m_value->child(0)->asInt32());
+                replaceWithNewValue(m_proc.addIntConstant(m_value, result));
+                break;
+            }
+
+            // Turn this: SExt16(SExt16(value))
+            // Into this: SExt16(value)
+            if (m_value->child(0)->opcode() == SExt16) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: SExt16(SExt8(value))
+            // Into this: SExt8(value)
+            if (m_value->child(0)->opcode() == SExt8) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()) {
+                Value* input = m_value->child(0)->child(0);
+                int32_t mask = m_value->child(0)->child(1)->asInt32();
+                
+                // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0xffff) == 0xffff
+                // Into this: SExt16(input)
+                if ((mask & 0xffff) == 0xffff) {
+                    m_value->child(0) = input;
+                    m_changed = true;
+                    break;
+                }
+                
+                // Turn this: SExt16(BitAnd(input, mask)) where (mask & 0x8000) == 0
+                // Into this: BitAnd(input, const & 0x7fff)
+                if (!(mask & 0x8000)) {
+                    replaceWithNewValue(
+                        m_proc.add(
+                            BitAnd, m_value->origin(), input,
+                            m_insertionSet.insert(
+                                m_index, m_value->origin(), mask & 0x7fff)));
+                    break;
+                }
+            }
+            break;
+
+        case SExt32:
+            // Turn this: SExt32(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithNewValue(m_proc.addIntConstant(m_value, m_value->child(0)->asInt32()));
+                break;
+            }
+
+            // Turn this: SExt32(BitAnd(input, mask)) where (mask & 0x80000000) == 0
+            // Into this: ZExt32(BitAnd(input, mask))
+            if (m_value->child(0)->opcode() == BitAnd && m_value->child(0)->child(1)->hasInt32()
+                && !(m_value->child(0)->child(1)->asInt32() & 0x80000000)) {
+                replaceWithNewValue(
+                    m_proc.add(
+                        ZExt32, m_value->origin(), m_value->child(0)));
+                break;
+            }
+            break;
+
+        case ZExt32:
+            // Turn this: ZExt32(constant)
+            // Into this: static_cast(static_cast(constant))
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithNewValue(
+                    m_proc.addIntConstant(
+                        m_value,
+                        static_cast(static_cast(m_value->child(0)->asInt32()))));
+                break;
+            }
+            break;
+
+        case Trunc:
+            // Turn this: Trunc(constant)
+            // Into this: static_cast(constant)
+            if (m_value->child(0)->hasInt64() || m_value->child(0)->hasDouble()) {
+                replaceWithNewValue(
+                    m_proc.addIntConstant(m_value, static_cast(m_value->child(0)->asInt64())));
+                break;
+            }
+
+            // Turn this: Trunc(SExt32(value)) or Trunc(ZExt32(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == SExt32 || m_value->child(0)->opcode() == ZExt32) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+
+            // Turn this: Trunc(Op(value, constant))
+            //     where !(constant & 0xffffffff)
+            //       and Op is Add, Sub, BitOr, or BitXor
+            // into this: Trunc(value)
+            switch (m_value->child(0)->opcode()) {
+            case Add:
+            case Sub:
+            case BitOr:
+            case BitXor:
+                if (m_value->child(0)->child(1)->hasInt64()
+                    && !(m_value->child(0)->child(1)->asInt64() & 0xffffffffll)) {
+                    m_value->child(0) = m_value->child(0)->child(0);
+                    m_changed = true;
+                    break;
+                }
+                break;
+            default:
+                break;
+            }
+            break;
+
+        case IToD:
+            // Turn this: IToD(constant)
+            // Into this: ConstDouble(constant)
+            if (Value* constant = m_value->child(0)->iToDConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case IToF:
+            // Turn this: IToF(constant)
+            // Into this: ConstFloat(constant)
+            if (Value* constant = m_value->child(0)->iToFConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case FloatToDouble:
+            // Turn this: FloatToDouble(constant)
+            // Into this: ConstDouble(constant)
+            if (Value* constant = m_value->child(0)->floatToDoubleConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case DoubleToFloat:
+            // Turn this: DoubleToFloat(FloatToDouble(value))
+            // Into this: value
+            if (m_value->child(0)->opcode() == FloatToDouble) {
+                replaceWithIdentity(m_value->child(0)->child(0));
+                break;
+            }
+
+            // Turn this: DoubleToFloat(constant)
+            // Into this: ConstFloat(constant)
+            if (Value* constant = m_value->child(0)->doubleToFloatConstant(m_proc)) {
+                replaceWithNewValue(constant);
+                break;
+            }
+            break;
+
+        case Select:
+            // Turn this: Select(constant, a, b)
+            // Into this: constant ? a : b
+            if (m_value->child(0)->hasInt32()) {
+                replaceWithIdentity(
+                    m_value->child(0)->asInt32() ? m_value->child(1) : m_value->child(2));
+                break;
+            }
+
+            // Turn this: Select(Equal(x, 0), a, b)
+            // Into this: Select(x, b, a)
+            if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_value->child(1), m_value->child(2));
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(BitXor(bool, 1), a, b)
+            // Into this: Select(bool, b, a)
+            if (m_value->child(0)->opcode() == BitXor
+                && m_value->child(0)->child(1)->isInt32(1)
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_value->child(1), m_value->child(2));
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(BitAnd(bool, xyz1), a, b)
+            // Into this: Select(bool, a, b)
+            if (m_value->child(0)->opcode() == BitAnd
+                && m_value->child(0)->child(1)->hasInt()
+                && m_value->child(0)->child(1)->asInt() & 1
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+                break;
+            }
+
+            // Turn this: Select(stuff, x, x)
+            // Into this: x
+            if (m_value->child(1) == m_value->child(2)) {
+                replaceWithIdentity(m_value->child(1));
+                break;
+            }
+            break;
+
+        case Load8Z:
+        case Load8S:
+        case Load16Z:
+        case Load16S:
+        case Load:
+        case Store8:
+        case Store16:
+        case Store: {
+            Value* address = m_value->lastChild();
+            MemoryValue* memory = m_value->as();
+
+            // Turn this: Load(Add(address, offset1), offset = offset2)
+            // Into this: Load(address, offset = offset1 + offset2)
+            //
+            // Also turns this: Store(value, Add(address, offset1), offset = offset2)
+            // Into this: Store(value, address, offset = offset1 + offset2)
+            if (address->opcode() == Add && address->child(1)->hasIntPtr()) {
+                intptr_t offset = address->child(1)->asIntPtr();
+                if (!sumOverflows(offset, memory->offset())) {
+                    offset += memory->offset();
+                    int32_t smallOffset = static_cast(offset);
+                    if (smallOffset == offset) {
+                        address = address->child(0);
+                        memory->lastChild() = address;
+                        memory->setOffset(smallOffset);
+                        m_changed = true;
+                    }
+                }
+            }
+
+            // Turn this: Load(constant1, offset = constant2)
+            // Into this: Load(constant1 + constant2)
+            //
+            // This is a fun canonicalization. It purely regresses naively generated code. We rely
+            // on constant materialization to be smart enough to materialize this constant the smart
+            // way. We want this canonicalization because we want to know if two memory accesses see
+            // the same address.
+            if (memory->offset()) {
+                if (Value* newAddress = address->addConstant(m_proc, memory->offset())) {
+                    m_insertionSet.insertValue(m_index, newAddress);
+                    address = newAddress;
+                    memory->lastChild() = newAddress;
+                    memory->setOffset(0);
+                    m_changed = true;
+                }
+            }
+            
+            break;
+        }
+
+        case CCall: {
+            // Turn this: Call(fmod, constant1, constant2)
+            // Into this: fcall-constant(constant1, constant2)
+            double(*fmodDouble)(double, double) = fmod;
+            if (m_value->type() == Double
+                && m_value->numChildren() == 3
+                && m_value->child(0)->isIntPtr(reinterpret_cast(fmodDouble))
+                && m_value->child(1)->type() == Double
+                && m_value->child(2)->type() == Double) {
+                replaceWithNewValue(m_value->child(1)->modConstant(m_proc, m_value->child(2)));
+            }
+            break;
+        }
+        case Equal:
+            handleCommutativity();
+
+            // Turn this: Equal(bool, 0)
+            // Into this: BitXor(bool, 1)
+            if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(0)) {
+                replaceWithNew(
+                    BitXor, m_value->origin(), m_value->child(0),
+                    m_insertionSet.insert(m_index, m_value->origin(), 1));
+                break;
+            }
+            
+            // Turn this Equal(bool, 1)
+            // Into this: bool
+            if (m_value->child(0)->returnsBool() && m_value->child(1)->isInt32(1)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            // Turn this: Equal(const1, const2)
+            // Into this: const1 == const2
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->equalConstant(m_value->child(1))));
+            break;
+            
+        case NotEqual:
+            handleCommutativity();
+
+            if (m_value->child(0)->returnsBool()) {
+                // Turn this: NotEqual(bool, 0)
+                // Into this: bool
+                if (m_value->child(1)->isInt32(0)) {
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                }
+                
+                // Turn this: NotEqual(bool, 1)
+                // Into this: Equal(bool, 0)
+                if (m_value->child(1)->isInt32(1)) {
+                    replaceWithNew(
+                        Equal, m_value->origin(), m_value->child(0),
+                        m_insertionSet.insertIntConstant(m_index, m_value->origin(), Int32, 0));
+                    break;
+                }
+            }
+
+            // Turn this: NotEqual(const1, const2)
+            // Into this: const1 != const2
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->notEqualConstant(m_value->child(1))));
+            break;
+
+        case LessThan:
+            // FIXME: We could do a better job of canonicalizing integer comparisons.
+            // https://bugs.webkit.org/show_bug.cgi?id=150958
+
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->lessThanConstant(m_value->child(1))));
+            break;
+
+        case GreaterThan:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->greaterThanConstant(m_value->child(1))));
+            break;
+
+        case LessEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->lessEqualConstant(m_value->child(1))));
+            break;
+
+        case GreaterEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->greaterEqualConstant(m_value->child(1))));
+            break;
+
+        case Above:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->aboveConstant(m_value->child(1))));
+            break;
+
+        case Below:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->belowConstant(m_value->child(1))));
+            break;
+
+        case AboveEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->aboveEqualConstant(m_value->child(1))));
+            break;
+
+        case BelowEqual:
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(0)->belowEqualConstant(m_value->child(1))));
+            break;
+
+        case EqualOrUnordered:
+            handleCommutativity();
+
+            // Turn this: Equal(const1, const2)
+            // Into this: isunordered(const1, const2) || const1 == const2.
+            // Turn this: Equal(value, const_NaN)
+            // Into this: 1.
+            replaceWithNewValue(
+                m_proc.addBoolConstant(
+                    m_value->origin(),
+                    m_value->child(1)->equalOrUnorderedConstant(m_value->child(0))));
+            break;
+
+        case CheckAdd: {
+            if (replaceWithNewValue(m_value->child(0)->checkAddConstant(m_proc, m_value->child(1))))
+                break;
+
+            handleCommutativity();
+            
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowAdd(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Add, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case CheckSub: {
+            if (replaceWithNewValue(m_value->child(0)->checkSubConstant(m_proc, m_value->child(1))))
+                break;
+
+            if (m_value->child(1)->isInt(0)) {
+                replaceWithIdentity(m_value->child(0));
+                break;
+            }
+
+            if (Value* negatedConstant = m_value->child(1)->checkNegConstant(m_proc)) {
+                m_insertionSet.insertValue(m_index, negatedConstant);
+                m_value->as()->convertToAdd();
+                m_value->child(1) = negatedConstant;
+                m_changed = true;
+                break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowSub(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Sub, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case CheckMul: {
+            if (replaceWithNewValue(m_value->child(0)->checkMulConstant(m_proc, m_value->child(1))))
+                break;
+
+            handleCommutativity();
+
+            if (m_value->child(1)->hasInt()) {
+                bool modified = true;
+                switch (m_value->child(1)->asInt()) {
+                case 0:
+                    replaceWithNewValue(m_proc.addIntConstant(m_value, 0));
+                    break;
+                case 1:
+                    replaceWithIdentity(m_value->child(0));
+                    break;
+                case 2:
+                    m_value->as()->convertToAdd();
+                    m_value->child(1) = m_value->child(0);
+                    m_changed = true;
+                    break;
+                default:
+                    modified = false;
+                    break;
+                }
+                if (modified)
+                    break;
+            }
+
+            IntRange leftRange = rangeFor(m_value->child(0));
+            IntRange rightRange = rangeFor(m_value->child(1));
+            if (!leftRange.couldOverflowMul(rightRange, m_value->type())) {
+                replaceWithNewValue(
+                    m_proc.add(Mul, m_value->origin(), m_value->child(0), m_value->child(1)));
+                break;
+            }
+            break;
+        }
+
+        case Check: {
+            CheckValue* checkValue = m_value->as();
+            
+            if (checkValue->child(0)->isLikeZero()) {
+                checkValue->replaceWithNop();
+                m_changed = true;
+                break;
+            }
+
+            if (checkValue->child(0)->isLikeNonZero()) {
+                PatchpointValue* patchpoint =
+                    m_insertionSet.insert(m_index, Void, checkValue->origin());
+
+                patchpoint->effects = Effects();
+                patchpoint->effects.reads = HeapRange::top();
+                patchpoint->effects.exitsSideways = true;
+
+                for (unsigned i = 1; i < checkValue->numChildren(); ++i)
+                    patchpoint->append(checkValue->constrainedChild(i));
+
+                patchpoint->setGenerator(checkValue->generator());
+
+                // Replace the rest of the block with an Oops.
+                for (unsigned i = m_index + 1; i < m_block->size() - 1; ++i)
+                    m_block->at(i)->replaceWithBottom(m_insertionSet, m_index);
+                m_block->last()->replaceWithOops(m_block);
+                m_block->last()->setOrigin(checkValue->origin());
+
+                // Replace ourselves last.
+                checkValue->replaceWithNop();
+                m_changedCFG = true;
+                break;
+            }
+
+            if (checkValue->child(0)->opcode() == NotEqual
+                && checkValue->child(0)->child(1)->isInt(0)) {
+                checkValue->child(0) = checkValue->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // If we are checking some bounded-size SSA expression that leads to a Select that
+            // has a constant as one of its results, then turn the Select into a Branch and split
+            // the code between the Check and the Branch. For example, this:
+            //
+            //     @a = Select(@p, @x, 42)
+            //     @b = Add(@a, 35)
+            //     Check(@b)
+            //
+            // becomes this:
+            //
+            //     Branch(@p, #truecase, #falsecase)
+            //
+            //   BB#truecase:
+            //     @b_truecase = Add(@x, 35)
+            //     Check(@b_truecase)
+            //     Upsilon(@x, ^a)
+            //     Upsilon(@b_truecase, ^b)
+            //     Jump(#continuation)
+            //
+            //   BB#falsecase:
+            //     @b_falsecase = Add(42, 35)
+            //     Check(@b_falsecase)
+            //     Upsilon(42, ^a)
+            //     Upsilon(@b_falsecase, ^b)
+            //     Jump(#continuation)
+            //
+            //   BB#continuation:
+            //     @a = Phi()
+            //     @b = Phi()
+            //
+            // The goal of this optimization is to kill a lot of code in one of those basic
+            // blocks. This is pretty much guaranteed since one of those blocks will replace all
+            // uses of the Select with a constant, and that constant will be transitively used
+            // from the check.
+            static const unsigned selectSpecializationBound = 3;
+            Value* select = findRecentNodeMatching(
+                m_value->child(0), selectSpecializationBound,
+                [&] (Value* value) -> bool {
+                    return value->opcode() == Select
+                        && (value->child(1)->isConstant() && value->child(2)->isConstant());
+                });
+            
+            if (select) {
+                specializeSelect(select);
+                break;
+            }
+            break;
+        }
+
+        case Branch: {
+            // Turn this: Branch(NotEqual(x, 0))
+            // Into this: Branch(x)
+            if (m_value->child(0)->opcode() == NotEqual && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            // Turn this: Branch(Equal(x, 0), then, else)
+            // Into this: Branch(x, else, then)
+            if (m_value->child(0)->opcode() == Equal && m_value->child(0)->child(1)->isInt(0)) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_block->taken(), m_block->notTaken());
+                m_changed = true;
+            }
+            
+            // Turn this: Branch(BitXor(bool, 1), then, else)
+            // Into this: Branch(bool, else, then)
+            if (m_value->child(0)->opcode() == BitXor
+                && m_value->child(0)->child(1)->isInt32(1)
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                std::swap(m_block->taken(), m_block->notTaken());
+                m_changed = true;
+            }
+
+            // Turn this: Branch(BitAnd(bool, xyb1), then, else)
+            // Into this: Branch(bool, then, else)
+            if (m_value->child(0)->opcode() == BitAnd
+                && m_value->child(0)->child(1)->hasInt()
+                && m_value->child(0)->child(1)->asInt() & 1
+                && m_value->child(0)->child(0)->returnsBool()) {
+                m_value->child(0) = m_value->child(0)->child(0);
+                m_changed = true;
+            }
+
+            TriState triState = m_value->child(0)->asTriState();
+
+            // Turn this: Branch(0, then, else)
+            // Into this: Jump(else)
+            if (triState == FalseTriState) {
+                m_block->taken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->notTaken());
+                m_changedCFG = true;
+                break;
+            }
+
+            // Turn this: Branch(not 0, then, else)
+            // Into this: Jump(then)
+            if (triState == TrueTriState) {
+                m_block->notTaken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->taken());
+                m_changedCFG = true;
+                break;
+            }
+
+            // If a check for the same property dominates us, we can kill the branch. This sort
+            // of makes sense here because it's cheap, but hacks like this show that we're going
+            // to need SCCP.
+            Value* check = m_pureCSE.findMatch(
+                ValueKey(Check, Void, m_value->child(0)), m_block, *m_dominators);
+            if (check) {
+                // The Check would have side-exited if child(0) was non-zero. So, it must be
+                // zero here.
+                m_block->taken().block()->removePredecessor(m_block);
+                m_value->replaceWithJump(m_block, m_block->notTaken());
+                m_changedCFG = true;
+            }
+            break;
+        }
+            
+        default:
+            break;
+        }
+    }
+
+    // Find a node that:
+    //     - functor(node) returns true.
+    //     - it's reachable from the given node via children.
+    //     - it's in the last "bound" slots in the current basic block.
+    // This algorithm is optimized under the assumption that the bound is small.
+    template
+    Value* findRecentNodeMatching(Value* start, unsigned bound, const Functor& functor)
+    {
+        unsigned startIndex = bound < m_index ? m_index - bound : 0;
+        Value* result = nullptr;
+        start->walk(
+            [&] (Value* value) -> Value::WalkStatus {
+                bool found = false;
+                for (unsigned i = startIndex; i <= m_index; ++i) {
+                    if (m_block->at(i) == value)
+                        found = true;
+                }
+                if (!found)
+                    return Value::IgnoreChildren;
+
+                if (functor(value)) {
+                    result = value;
+                    return Value::Stop;
+                }
+
+                return Value::Continue;
+            });
+        return result;
+    }
+
+    // This specializes a sequence of code up to a Select. This doesn't work when we're at a
+    // terminal. It would be cool to fix that eventually. The main problem is that instead of
+    // splitting the block, we should just insert the then/else blocks. We'll have to create
+    // double the Phis and double the Upsilons. It'll probably be the sort of optimization that
+    // we want to do only after we've done loop optimizations, since this will *definitely*
+    // obscure things. In fact, even this simpler form of select specialization will possibly
+    // obscure other optimizations. It would be great to have two modes of strength reduction,
+    // one that does obscuring optimizations and runs late, and another that does not do
+    // obscuring optimizations and runs early.
+    // FIXME: Make select specialization handle branches.
+    // FIXME: Have a form of strength reduction that does no obscuring optimizations and runs
+    // early.
+    void specializeSelect(Value* source)
+    {
+        if (verbose)
+            dataLog("Specializing select: ", deepDump(m_proc, source), "\n");
+
+        // This mutates startIndex to account for the fact that m_block got the front of it
+        // chopped off.
+        BasicBlock* predecessor =
+            m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet);
+
+        // Splitting will commit the insertion set, which changes the exact position of the
+        // source. That's why we do the search after splitting.
+        unsigned startIndex = UINT_MAX;
+        for (unsigned i = predecessor->size(); i--;) {
+            if (predecessor->at(i) == source) {
+                startIndex = i;
+                break;
+            }
+        }
+        
+        RELEASE_ASSERT(startIndex != UINT_MAX);
+
+        // By BasicBlock convention, caseIndex == 0 => then, caseIndex == 1 => else.
+        static const unsigned numCases = 2;
+        BasicBlock* cases[numCases];
+        for (unsigned i = 0; i < numCases; ++i)
+            cases[i] = m_blockInsertionSet.insertBefore(m_block);
+
+        HashMap mappings[2];
+
+        // Save things we want to know about the source.
+        Value* predicate = source->child(0);
+
+        for (unsigned i = 0; i < numCases; ++i)
+            mappings[i].add(source, source->child(1 + i));
+
+        auto cloneValue = [&] (Value* value) {
+            ASSERT(value != source);
+
+            for (unsigned i = 0; i < numCases; ++i) {
+                Value* clone = m_proc.clone(value);
+                for (Value*& child : clone->children()) {
+                    if (Value* newChild = mappings[i].get(child))
+                        child = newChild;
+                }
+                if (value->type() != Void)
+                    mappings[i].add(value, clone);
+
+                cases[i]->append(clone);
+                if (value->type() != Void)
+                    cases[i]->appendNew(m_proc, value->origin(), clone, value);
+            }
+
+            value->replaceWithPhi();
+        };
+
+        // The jump that the splitter inserted is of no use to us.
+        predecessor->removeLast(m_proc);
+
+        // Hance the source, it's special.
+        for (unsigned i = 0; i < numCases; ++i) {
+            cases[i]->appendNew(
+                m_proc, source->origin(), source->child(1 + i), source);
+        }
+        source->replaceWithPhi();
+        m_insertionSet.insertValue(m_index, source);
+
+        // Now handle all values between the source and the check.
+        for (unsigned i = startIndex + 1; i < predecessor->size(); ++i) {
+            Value* value = predecessor->at(i);
+            value->owner = nullptr;
+
+            cloneValue(value);
+
+            if (value->type() != Void)
+                m_insertionSet.insertValue(m_index, value);
+            else
+                m_proc.deleteValue(value);
+        }
+
+        // Finally, deal with the check.
+        cloneValue(m_value);
+
+        // Remove the values from the predecessor.
+        predecessor->values().resize(startIndex);
+        
+        predecessor->appendNew(m_proc, Branch, source->origin(), predicate);
+        predecessor->setSuccessors(FrequentedBlock(cases[0]), FrequentedBlock(cases[1]));
+
+        for (unsigned i = 0; i < numCases; ++i) {
+            cases[i]->appendNew(m_proc, Jump, m_value->origin());
+            cases[i]->setSuccessors(FrequentedBlock(m_block));
+        }
+
+        m_changed = true;
+
+        predecessor->updatePredecessorsAfter();
+    }
+
+    // Turn this: Add(constant, value)
+    // Into this: Add(value, constant)
+    //
+    // Also:
+    // Turn this: Add(value1, value2)
+    // Into this: Add(value2, value1)
+    // If we decide that value2 coming first is the canonical ordering.
+    void handleCommutativity()
+    {
+        // Note that we have commutative operations that take more than two children. Those operations may
+        // commute their first two children while leaving the rest unaffected.
+        ASSERT(m_value->numChildren() >= 2);
+        
+        // Leave it alone if the right child is a constant.
+        if (m_value->child(1)->isConstant())
+            return;
+        
+        if (m_value->child(0)->isConstant()) {
+            std::swap(m_value->child(0), m_value->child(1));
+            m_changed = true;
+            return;
+        }
+
+        // Sort the operands. This is an important canonicalization. We use the index instead of
+        // the address to make this at least slightly deterministic.
+        if (m_value->child(0)->index() > m_value->child(1)->index()) {
+            std::swap(m_value->child(0), m_value->child(1));
+            m_changed = true;
+            return;
+        }
+    }
+
+    // FIXME: This should really be a forward analysis. Instead, we uses a bounded-search backwards
+    // analysis.
+    IntRange rangeFor(Value* value, unsigned timeToLive = 5)
+    {
+        if (!timeToLive)
+            return IntRange::top(value->type());
+        
+        switch (value->opcode()) {
+        case Const32:
+        case Const64: {
+            int64_t intValue = value->asInt();
+            return IntRange(intValue, intValue);
+        }
+
+        case BitAnd:
+            if (value->child(1)->hasInt())
+                return IntRange::rangeForMask(value->child(1)->asInt(), value->type());
+            break;
+
+        case SShr:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).sShr(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case ZShr:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).zShr(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case Shl:
+            if (value->child(1)->hasInt32()) {
+                return rangeFor(value->child(0), timeToLive - 1).shl(
+                    value->child(1)->asInt32(), value->type());
+            }
+            break;
+
+        case Add:
+            return rangeFor(value->child(0), timeToLive - 1).add(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        case Sub:
+            return rangeFor(value->child(0), timeToLive - 1).sub(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        case Mul:
+            return rangeFor(value->child(0), timeToLive - 1).mul(
+                rangeFor(value->child(1), timeToLive - 1), value->type());
+
+        default:
+            break;
+        }
+
+        return IntRange::top(value->type());
+    }
+
+    template
+    void replaceWithNew(Arguments... arguments)
+    {
+        replaceWithNewValue(m_proc.add(arguments...));
+    }
+
+    bool replaceWithNewValue(Value* newValue)
+    {
+        if (!newValue)
+            return false;
+        m_insertionSet.insertValue(m_index, newValue);
+        m_value->replaceWithIdentity(newValue);
+        m_changed = true;
+        return true;
+    }
+
+    void replaceWithIdentity(Value* newValue)
+    {
+        m_value->replaceWithIdentity(newValue);
+        m_changed = true;
+    }
+
+    void handleShiftAmount()
+    {
+        // Shift anything by zero is identity.
+        if (m_value->child(1)->isInt32(0)) {
+            replaceWithIdentity(m_value->child(0));
+            return;
+        }
+
+        // The shift already masks its shift amount. If the shift amount is being masked by a
+        // redundant amount, then remove the mask. For example,
+        // Turn this: Shl(@x, BitAnd(@y, 63))
+        // Into this: Shl(@x, @y)
+        unsigned mask = sizeofType(m_value->type()) * 8 - 1;
+        if (m_value->child(1)->opcode() == BitAnd
+            && m_value->child(1)->child(1)->hasInt32()
+            && (m_value->child(1)->child(1)->asInt32() & mask) == mask) {
+            m_value->child(1) = m_value->child(1)->child(0);
+            m_changed = true;
+        }
+    }
+
+    void replaceIfRedundant()
+    {
+        m_changed |= m_pureCSE.process(m_value, *m_dominators);
+    }
+
+    void simplifyCFG()
+    {
+        if (verbose) {
+            dataLog("Before simplifyCFG:\n");
+            dataLog(m_proc);
+        }
+        
+        // We have three easy simplification rules:
+        //
+        // 1) If a successor is a block that just jumps to another block, then jump directly to
+        //    that block.
+        //
+        // 2) If all successors are the same and the operation has no effects, then use a jump
+        //    instead.
+        //
+        // 3) If you jump to a block that is not you and has one predecessor, then merge.
+        //
+        // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+        // If you need broken critical edges, then you have to break them yourself.
+
+        // Note that this relies on predecessors being at least conservatively correct. It's fine for
+        // predecessors to mention a block that isn't actually a predecessor. It's *not* fine for a
+        // predecessor to be omitted. We assert as much in the loop. In practice, we precisely preserve
+        // predecessors during strength reduction since that minimizes the total number of fixpoint
+        // iterations needed to kill a lot of code.
+
+        for (BasicBlock* block : m_proc) {
+            if (verbose)
+                dataLog("Considering block ", *block, ":\n");
+
+            checkPredecessorValidity();
+
+            // We don't care about blocks that don't have successors.
+            if (!block->numSuccessors())
+                continue;
+
+            // First check if any of the successors of this block can be forwarded over.
+            for (BasicBlock*& successor : block->successorBlocks()) {
+                if (successor != block
+                    && successor->size() == 1
+                    && successor->last()->opcode() == Jump) {
+                    BasicBlock* newSuccessor = successor->successorBlock(0);
+                    if (newSuccessor != successor) {
+                        if (verbose) {
+                            dataLog(
+                                "Replacing ", pointerDump(block), "->", pointerDump(successor),
+                                " with ", pointerDump(block), "->", pointerDump(newSuccessor),
+                                "\n");
+                        }
+                        // Note that we do not do replacePredecessor() because the block we're
+                        // skipping will still have newSuccessor as its successor.
+                        newSuccessor->addPredecessor(block);
+                        successor = newSuccessor;
+                        m_changedCFG = true;
+                    }
+                }
+            }
+
+            // Now check if the block's terminal can be replaced with a jump.
+            if (block->numSuccessors() > 1) {
+                // The terminal must not have weird effects.
+                Effects effects = block->last()->effects();
+                effects.terminal = false;
+                if (!effects.mustExecute()) {
+                    // All of the successors must be the same.
+                    bool allSame = true;
+                    BasicBlock* firstSuccessor = block->successorBlock(0);
+                    for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+                        if (block->successorBlock(i) != firstSuccessor) {
+                            allSame = false;
+                            break;
+                        }
+                    }
+                    if (allSame) {
+                        if (verbose) {
+                            dataLog(
+                                "Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+                        }
+                        block->last()->replaceWithJump(block, FrequentedBlock(firstSuccessor));
+                        m_changedCFG = true;
+                    }
+                }
+            }
+
+            // Finally handle jumps to a block with one predecessor.
+            if (block->numSuccessors() == 1) {
+                BasicBlock* successor = block->successorBlock(0);
+                if (successor != block && successor->numPredecessors() == 1) {
+                    RELEASE_ASSERT(successor->predecessor(0) == block);
+                    
+                    // We can merge the two blocks, because the predecessor only jumps to the successor
+                    // and the successor is only reachable from the predecessor.
+                    
+                    // Remove the terminal.
+                    Value* value = block->values().takeLast();
+                    Origin jumpOrigin = value->origin();
+                    RELEASE_ASSERT(value->effects().terminal);
+                    m_proc.deleteValue(value);
+                    
+                    // Append the full contents of the successor to the predecessor.
+                    block->values().appendVector(successor->values());
+                    block->successors() = successor->successors();
+                    
+                    // Make sure that the successor has nothing left in it. Make sure that the block
+                    // has a terminal so that nobody chokes when they look at it.
+                    successor->values().resize(0);
+                    successor->appendNew(m_proc, Oops, jumpOrigin);
+                    successor->clearSuccessors();
+                    
+                    // Ensure that predecessors of block's new successors know what's up.
+                    for (BasicBlock* newSuccessor : block->successorBlocks())
+                        newSuccessor->replacePredecessor(successor, block);
+
+                    if (verbose) {
+                        dataLog(
+                            "Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+                    }
+                    
+                    m_changedCFG = true;
+                }
+            }
+        }
+
+        if (m_changedCFG && verbose) {
+            dataLog("B3 after simplifyCFG:\n");
+            dataLog(m_proc);
+        }
+    }
+
+    void checkPredecessorValidity()
+    {
+        if (!shouldValidateIRAtEachPhase())
+            return;
+
+        for (BasicBlock* block : m_proc) {
+            for (BasicBlock* successor : block->successorBlocks())
+                RELEASE_ASSERT(successor->containsPredecessor(block));
+        }
+    }
+
+    void killDeadCode()
+    {
+        GraphNodeWorklist> worklist;
+        Vector upsilons;
+        for (BasicBlock* block : m_proc) {
+            for (Value* value : *block) {
+                Effects effects;
+                // We don't care about effects of SSA operations, since we model them more
+                // accurately than the effects() method does.
+                if (value->opcode() != Phi && value->opcode() != Upsilon)
+                    effects = value->effects();
+                
+                if (effects.mustExecute())
+                    worklist.push(value);
+                
+                if (UpsilonValue* upsilon = value->as())
+                    upsilons.append(upsilon);
+            }
+        }
+        for (;;) {
+            while (Value* value = worklist.pop()) {
+                for (Value* child : value->children())
+                    worklist.push(child);
+            }
+            
+            bool didPush = false;
+            for (size_t upsilonIndex = 0; upsilonIndex < upsilons.size(); ++upsilonIndex) {
+                UpsilonValue* upsilon = upsilons[upsilonIndex];
+                if (worklist.saw(upsilon->phi())) {
+                    worklist.push(upsilon);
+                    upsilons[upsilonIndex--] = upsilons.last();
+                    upsilons.takeLast();
+                    didPush = true;
+                }
+            }
+            if (!didPush)
+                break;
+        }
+
+        IndexSet liveVariables;
+        
+        for (BasicBlock* block : m_proc) {
+            size_t sourceIndex = 0;
+            size_t targetIndex = 0;
+            while (sourceIndex < block->size()) {
+                Value* value = block->at(sourceIndex++);
+                if (worklist.saw(value)) {
+                    if (VariableValue* variableValue = value->as())
+                        liveVariables.add(variableValue->variable());
+                    block->at(targetIndex++) = value;
+                } else {
+                    m_proc.deleteValue(value);
+                    m_changed = true;
+                }
+            }
+            block->values().resize(targetIndex);
+        }
+
+        for (Variable* variable : m_proc.variables()) {
+            if (!liveVariables.contains(variable))
+                m_proc.deleteVariable(variable);
+        }
+    }
+
+    void simplifySSA()
+    {
+        // This runs Aycock and Horspool's algorithm on our Phi functions [1]. For most CFG patterns,
+        // this can take a suboptimal arrangement of Phi functions and make it optimal, as if you had
+        // run Cytron, Ferrante, Rosen, Wegman, and Zadeck. It's only suboptimal for irreducible
+        // CFGs. In practice, that doesn't matter, since we expect clients of B3 to run their own SSA
+        // conversion before lowering to B3, and in the case of the DFG, that conversion uses Cytron
+        // et al. In that context, this algorithm is intended to simplify Phi functions that were
+        // made redundant by prior CFG simplification. But according to Aycock and Horspool's paper,
+        // this algorithm is good enough that a B3 client could just give us maximal Phi's (i.e. Phi
+        // for each variable at each basic block) and we will make them optimal.
+        // [1] http://pages.cpsc.ucalgary.ca/~aycock/papers/ssa.ps
+
+        // Aycock and Horspool prescribe two rules that are to be run to fixpoint:
+        //
+        // 1) If all of the Phi's children are the same (i.e. it's one child referenced from one or
+        //    more Upsilons), then replace all uses of the Phi with the one child.
+        //
+        // 2) If all of the Phi's children are either the Phi itself or exactly one other child, then
+        //    replace all uses of the Phi with the one other child.
+        //
+        // Rule (2) subsumes rule (1), so we can just run (2). We only run one fixpoint iteration
+        // here. This premise is that in common cases, this will only find optimization opportunities
+        // as a result of CFG simplification and usually CFG simplification will only do one round
+        // of block merging per ReduceStrength fixpoint iteration, so it's OK for this to only do one
+        // round of Phi merging - since Phis are the value analogue of blocks.
+
+        PhiChildren phiChildren(m_proc);
+
+        for (Value* phi : phiChildren.phis()) {
+            Value* otherChild = nullptr;
+            bool ok = true;
+            for (Value* child : phiChildren[phi].values()) {
+                if (child == phi)
+                    continue;
+                if (child == otherChild)
+                    continue;
+                if (!otherChild) {
+                    otherChild = child;
+                    continue;
+                }
+                ok = false;
+                break;
+            }
+            if (!ok)
+                continue;
+            if (!otherChild) {
+                // Wow, this would be super weird. It probably won't happen, except that things could
+                // get weird as a consequence of stepwise simplifications in the strength reduction
+                // fixpoint.
+                continue;
+            }
+            
+            // Turn the Phi into an Identity and turn the Upsilons into Nops.
+            m_changed = true;
+            for (Value* upsilon : phiChildren[phi])
+                upsilon->replaceWithNop();
+            phi->replaceWithIdentity(otherChild);
+        }
+    }
+
+    Procedure& m_proc;
+    InsertionSet m_insertionSet;
+    BlockInsertionSet m_blockInsertionSet;
+    BasicBlock* m_block { nullptr };
+    unsigned m_index { 0 };
+    Value* m_value { nullptr };
+    Dominators* m_dominators { nullptr };
+    PureCSE m_pureCSE;
+    bool m_changed { false };
+    bool m_changedCFG { false };
+};
+
+} // anonymous namespace
+
+bool reduceStrength(Procedure& proc)
+{
+    PhaseScope phaseScope(proc, "reduceStrength");
+    ReduceStrength reduceStrength(proc);
+    return reduceStrength.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3ReduceStrength.h b/b3/B3ReduceStrength.h
new file mode 100644
index 0000000..1abb80f
--- /dev/null
+++ b/b3/B3ReduceStrength.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+// Does strength reduction, constant folding, canonicalization, CFG simplification, DCE, and very
+// simple CSE. This phase runs those optimizations to fixpoint. The goal of the phase is to
+// dramatically reduce the complexity of the code. In the future, it's preferable to add optimizations
+// to this phase rather than creating new optimizations because then the optimizations can participate
+// in the fixpoint. However, because of the many interlocking optimizations, it can be difficult to
+// add sophisticated optimizations to it. For that reason we have full CSE in a different phase, for
+// example.
+
+JS_EXPORT_PRIVATE bool reduceStrength(Procedure&);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SSACalculator.cpp b/b3/B3SSACalculator.cpp
new file mode 100644
index 0000000..30692a9
--- /dev/null
+++ b/b3/B3SSACalculator.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SSACalculator.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+void SSACalculator::Variable::dump(PrintStream& out) const
+{
+    out.print("var", m_index);
+}
+
+void SSACalculator::Variable::dumpVerbose(PrintStream& out) const
+{
+    dump(out);
+    if (!m_blocksWithDefs.isEmpty()) {
+        out.print("(defs: ");
+        CommaPrinter comma;
+        for (BasicBlock* block : m_blocksWithDefs)
+            out.print(comma, *block);
+        out.print(")");
+    }
+}
+
+void SSACalculator::Def::dump(PrintStream& out) const
+{
+    out.print("def(", *m_variable, ", ", *m_block, ", ", pointerDump(m_value), ")");
+}
+
+SSACalculator::SSACalculator(Procedure& proc)
+    : m_data(proc.size())
+    , m_proc(proc)
+{
+}
+
+SSACalculator::~SSACalculator()
+{
+}
+
+void SSACalculator::reset()
+{
+    m_variables.clear();
+    m_defs.clear();
+    m_phis.clear();
+    for (unsigned blockIndex = m_data.size(); blockIndex--;) {
+        m_data[blockIndex].m_defs.clear();
+        m_data[blockIndex].m_phis.clear();
+    }
+}
+
+SSACalculator::Variable* SSACalculator::newVariable()
+{
+    return &m_variables.alloc(Variable(m_variables.size()));
+}
+
+SSACalculator::Def* SSACalculator::newDef(Variable* variable, BasicBlock* block, Value* value)
+{
+    Def* def = m_defs.add(Def(variable, block, value));
+    auto result = m_data[block].m_defs.add(variable, def);
+    if (result.isNewEntry)
+        variable->m_blocksWithDefs.append(block);
+    else
+        result.iterator->value = def;
+    return def;
+}
+
+SSACalculator::Def* SSACalculator::nonLocalReachingDef(BasicBlock* block, Variable* variable)
+{
+    return reachingDefAtTail(m_dominators->idom(block), variable);
+}
+
+SSACalculator::Def* SSACalculator::reachingDefAtTail(BasicBlock* block, Variable* variable)
+{
+    for (; block; block = m_dominators->idom(block)) {
+        if (Def* def = m_data[block].m_defs.get(variable))
+            return def;
+    }
+    return nullptr;
+}
+
+void SSACalculator::dump(PrintStream& out) const
+{
+    out.print("(this)->m_defs)
+        out.print(comma, *def);
+    out.print("], Phis: [");
+    comma = CommaPrinter();
+    for (Def* def : const_cast(this)->m_phis)
+        out.print(comma, *def);
+    out.print("], Block data: [");
+    comma = CommaPrinter();
+    for (unsigned blockIndex = 0; blockIndex < m_proc.size(); ++blockIndex) {
+        BasicBlock* block = m_proc[blockIndex];
+        if (!block)
+            continue;
+        
+        out.print(comma, *block, "=>(");
+        out.print("Defs: {");
+        CommaPrinter innerComma;
+        for (auto entry : m_data[block].m_defs)
+            out.print(innerComma, *entry.key, "->", *entry.value);
+        out.print("}, Phis: {");
+        innerComma = CommaPrinter();
+        for (Def* def : m_data[block].m_phis)
+            out.print(innerComma, *def);
+        out.print("})");
+    }
+    out.print("]>");
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3SSACalculator.h b/b3/B3SSACalculator.h
new file mode 100644
index 0000000..be9a064
--- /dev/null
+++ b/b3/B3SSACalculator.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Dominators.h"
+#include "B3ProcedureInlines.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// SSACalculator provides a reusable tool for building SSA's. It's modeled after
+// DFG::SSACalculator.
+
+class SSACalculator {
+public:
+    SSACalculator(Procedure&);
+    ~SSACalculator();
+
+    void reset();
+
+    class Variable {
+    public:
+        unsigned index() const { return m_index; }
+        
+        void dump(PrintStream&) const;
+        void dumpVerbose(PrintStream&) const;
+        
+    private:
+        friend class SSACalculator;
+        
+        Variable()
+            : m_index(UINT_MAX)
+        {
+        }
+        
+        Variable(unsigned index)
+            : m_index(index)
+        {
+        }
+
+        Vector m_blocksWithDefs;
+        unsigned m_index;
+    };
+
+    class Def {
+    public:
+        Variable* variable() const { return m_variable; }
+        BasicBlock* block() const { return m_block; }
+        
+        Value* value() const { return m_value; }
+        
+        void dump(PrintStream&) const;
+        
+    private:
+        friend class SSACalculator;
+        
+        Def()
+            : m_variable(nullptr)
+            , m_block(nullptr)
+            , m_value(nullptr)
+        {
+        }
+        
+        Def(Variable* variable, BasicBlock* block, Value* value)
+            : m_variable(variable)
+            , m_block(block)
+            , m_value(value)
+        {
+        }
+        
+        Variable* m_variable;
+        BasicBlock* m_block;
+        Value* m_value;
+    };
+
+    Variable* newVariable();
+    Def* newDef(Variable*, BasicBlock*, Value*);
+
+    Variable* variable(unsigned index) { return &m_variables[index]; }
+
+    template
+    void computePhis(const Functor& functor)
+    {
+        m_dominators = &m_proc.dominators();
+        for (Variable& variable : m_variables) {
+            m_dominators->forAllBlocksInPrunedIteratedDominanceFrontierOf(
+                variable.m_blocksWithDefs,
+                [&] (BasicBlock* block) -> bool {
+                    Value* phi = functor(&variable, block);
+                    if (!phi)
+                        return false;
+
+                    BlockData& data = m_data[block];
+                    Def* phiDef = m_phis.add(Def(&variable, block, phi));
+                    data.m_phis.append(phiDef);
+
+                    data.m_defs.add(&variable, phiDef);
+                    return true;
+                });
+        }
+    }
+
+    const Vector& phisForBlock(BasicBlock* block)
+    {
+        return m_data[block].m_phis;
+    }
+    
+    // Ignores defs within the given block; it assumes that you've taken care of those
+    // yourself.
+    Def* nonLocalReachingDef(BasicBlock*, Variable*);
+    Def* reachingDefAtHead(BasicBlock* block, Variable* variable)
+    {
+        return nonLocalReachingDef(block, variable);
+    }
+    
+    // Considers the def within the given block, but only works at the tail of the block.
+    Def* reachingDefAtTail(BasicBlock*, Variable*);
+    
+    void dump(PrintStream&) const;
+    
+private:
+    SegmentedVector m_variables;
+    Bag m_defs;
+    
+    Bag m_phis;
+    
+    struct BlockData {
+        HashMap m_defs;
+        Vector m_phis;
+    };
+    
+    IndexMap m_data;
+
+    Dominators* m_dominators { nullptr };
+    Procedure& m_proc;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SlotBaseValue.cpp b/b3/B3SlotBaseValue.cpp
new file mode 100644
index 0000000..b5fd69b
--- /dev/null
+++ b/b3/B3SlotBaseValue.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SlotBaseValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 {
+
+SlotBaseValue::~SlotBaseValue()
+{
+}
+
+void SlotBaseValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, pointerDump(m_slot));
+}
+
+Value* SlotBaseValue::cloneImpl() const
+{
+    return new SlotBaseValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SlotBaseValue.h b/b3/B3SlotBaseValue.h
new file mode 100644
index 0000000..19392ea
--- /dev/null
+++ b/b3/B3SlotBaseValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+class JS_EXPORT_PRIVATE SlotBaseValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == SlotBase; }
+
+    ~SlotBaseValue();
+
+    StackSlot* slot() const { return m_slot; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    SlotBaseValue(Origin origin, StackSlot* slot)
+        : Value(CheckedOpcode, SlotBase, pointerType(), origin)
+        , m_slot(slot)
+    {
+    }
+
+    StackSlot* m_slot;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SparseCollection.h b/b3/B3SparseCollection.h
new file mode 100644
index 0000000..46c33a9
--- /dev/null
+++ b/b3/B3SparseCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+// B3::Procedure and Air::Code have a lot of collections of indexed things. This has all of the
+// logic.
+
+template
+class SparseCollection {
+    typedef Vector> VectorType;
+    
+public:
+    SparseCollection()
+    {
+    }
+
+    T* add(std::unique_ptr value)
+    {
+        T* result = value.get();
+        
+        size_t index;
+        if (m_indexFreeList.isEmpty()) {
+            index = m_vector.size();
+            m_vector.append(nullptr);
+        } else
+            index = m_indexFreeList.takeLast();
+
+        value->m_index = index;
+        ASSERT(!m_vector[index]);
+        new (NotNull, &m_vector[index]) std::unique_ptr(WTFMove(value));
+
+        return result;
+    }
+
+    template
+    T* addNew(Arguments&&... arguments)
+    {
+        return add(std::unique_ptr(new T(std::forward(arguments)...)));
+    }
+
+    void remove(T* value)
+    {
+        RELEASE_ASSERT(m_vector[value->m_index].get() == value);
+        m_indexFreeList.append(value->m_index);
+        m_vector[value->m_index] = nullptr;
+    }
+
+    unsigned size() const { return m_vector.size(); }
+    bool isEmpty() const { return m_vector.isEmpty(); }
+    
+    T* at(unsigned index) const { return m_vector[index].get(); }
+    T* operator[](unsigned index) const { return at(index); }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const SparseCollection& collection, unsigned index)
+            : m_collection(&collection)
+            , m_index(findNext(index))
+        {
+        }
+
+        T* operator*()
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index = findNext(m_index + 1);
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        unsigned findNext(unsigned index)
+        {
+            while (index < m_collection->size() && !m_collection->at(index))
+                index++;
+            return index;
+        }
+
+        const SparseCollection* m_collection;
+        unsigned m_index;
+    };
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+private:
+    Vector, 0, UnsafeVectorOverflow> m_vector;
+    Vector m_indexFreeList;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3StackSlot.cpp b/b3/B3StackSlot.cpp
new file mode 100644
index 0000000..4e22014
--- /dev/null
+++ b/b3/B3StackSlot.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackSlot::~StackSlot()
+{
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+    out.print("stack", m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+    out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP);
+}
+
+StackSlot::StackSlot(unsigned byteSize)
+    : m_byteSize(byteSize)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3StackSlot.h b/b3/B3StackSlot.h
new file mode 100644
index 0000000..4a47509
--- /dev/null
+++ b/b3/B3StackSlot.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+namespace Air {
+class StackSlot;
+} // namespace Air
+
+class StackSlot {
+    WTF_MAKE_NONCOPYABLE(StackSlot);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    ~StackSlot();
+
+    unsigned byteSize() const { return m_byteSize; }
+    unsigned index() const { return m_index; }
+
+    // This gets assigned at the end of compilation. But, you can totally pin stack slots. Use the
+    // set method to do that.
+    intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+    // Note that this is meaningless unless the stack slot is Locked.
+    void setOffsetFromFP(intptr_t value)
+    {
+        m_offsetFromFP = value;
+    }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Air::StackSlot;
+    friend class Procedure;
+    friend class SparseCollection;
+
+    StackSlot(unsigned byteSize);
+
+    unsigned m_index { UINT_MAX };
+    unsigned m_byteSize { 0 };
+    intptr_t m_offsetFromFP { 0 };
+};
+
+class DeepStackSlotDump {
+public:
+    DeepStackSlotDump(const StackSlot* slot)
+        : m_slot(slot)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_slot)
+            m_slot->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+    return DeepStackSlotDump(slot);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3StackmapGenerationParams.cpp b/b3/B3StackmapGenerationParams.cpp
new file mode 100644
index 0000000..0a07e4e
--- /dev/null
+++ b/b3/B3StackmapGenerationParams.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapGenerationParams.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3StackmapValue.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+const RegisterSet& StackmapGenerationParams::usedRegisters() const
+{
+    return m_value->m_usedRegisters;
+}
+
+RegisterSet StackmapGenerationParams::unavailableRegisters() const
+{
+    RegisterSet result = usedRegisters();
+    
+    RegisterSet unsavedCalleeSaves = RegisterSet::vmCalleeSaveRegisters();
+    for (const RegisterAtOffset& regAtOffset : m_context.code->calleeSaveRegisters())
+        unsavedCalleeSaves.clear(regAtOffset.reg());
+
+    result.merge(unsavedCalleeSaves);
+
+    for (GPRReg gpr : m_gpScratch)
+        result.clear(gpr);
+    for (FPRReg fpr : m_fpScratch)
+        result.clear(fpr);
+    
+    return result;
+}
+
+Vector> StackmapGenerationParams::successorLabels() const
+{
+    RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+    RELEASE_ASSERT(m_value->effects().terminal);
+    
+    Vector> result(m_context.currentBlock->numSuccessors());
+    for (unsigned i = m_context.currentBlock->numSuccessors(); i--;)
+        result[i] = m_context.blockLabels[m_context.currentBlock->successorBlock(i)];
+    return result;
+}
+
+bool StackmapGenerationParams::fallsThroughToSuccessor(unsigned successorIndex) const
+{
+    RELEASE_ASSERT(m_context.indexInBlock == m_context.currentBlock->size() - 1);
+    RELEASE_ASSERT(m_value->effects().terminal);
+    
+    Air::BasicBlock* successor = m_context.currentBlock->successorBlock(successorIndex);
+    Air::BasicBlock* nextBlock = m_context.code->findNextBlock(m_context.currentBlock);
+    return successor == nextBlock;
+}
+
+Procedure& StackmapGenerationParams::proc() const
+{
+    return m_context.code->proc();
+}
+
+StackmapGenerationParams::StackmapGenerationParams(
+    StackmapValue* value, const Vector& reps, Air::GenerationContext& context)
+    : m_value(value)
+    , m_reps(reps)
+    , m_context(context)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3StackmapGenerationParams.h b/b3/B3StackmapGenerationParams.h
new file mode 100644
index 0000000..31d19ed
--- /dev/null
+++ b/b3/B3StackmapGenerationParams.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirGenerationContext.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class CheckSpecial;
+class PatchpointSpecial;
+class Procedure;
+class StackmapValue;
+
+// NOTE: It's possible to capture StackmapGenerationParams by value, but not all of the methods will
+// work if you do that.
+class StackmapGenerationParams {
+public:
+    // This is the stackmap value that we're generating.
+    StackmapValue* value() const { return m_value; }
+    
+    // This tells you the actual value representations that were chosen. This is usually different
+    // from the constraints we supplied.
+    const Vector& reps() const { return m_reps; };
+
+    // Usually we wish to access the reps. We make this easy by making ourselves appear to be a
+    // collection of reps.
+    unsigned size() const { return m_reps.size(); }
+    const ValueRep& at(unsigned index) const { return m_reps[index]; }
+    const ValueRep& operator[](unsigned index) const { return at(index); }
+    Vector::const_iterator begin() const { return m_reps.begin(); }
+    Vector::const_iterator end() const { return m_reps.end(); }
+    
+    // This tells you the registers that were used.
+    const RegisterSet& usedRegisters() const;
+
+    // This is a useful helper if you want to do register allocation inside of a patchpoint. The
+    // usedRegisters() set is not directly useful for this purpose because:
+    //
+    // - You can only use callee-save registers for scratch if they were saved in the prologue. So,
+    //   if a register is callee-save, it's not enough that it's not in usedRegisters().
+    //
+    // - Scratch registers are going to be in usedRegisters() at the patchpoint. So, if you want to
+    //   find one of your requested scratch registers using usedRegisters(), you'll have a bad time.
+    //
+    // This gives you the used register set that's useful for allocating scratch registers. This set
+    // is defined as:
+    //
+    //     (usedRegisters() | (RegisterSet::calleeSaveRegisters() - proc.calleeSaveRegisters()))
+    //     - gpScratchRegisters - fpScratchRegisters
+    //
+    // I.e. it is like usedRegisters() but also includes unsaved callee-saves and excludes scratch
+    // registers.
+    JS_EXPORT_PRIVATE RegisterSet unavailableRegisters() const;
+
+    GPRReg gpScratch(unsigned index) const { return m_gpScratch[index]; }
+    FPRReg fpScratch(unsigned index) const { return m_fpScratch[index]; }
+    
+    // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+    // These labels will get populated before any late paths or link tasks execute.
+    JS_EXPORT_PRIVATE Vector> successorLabels() const;
+    
+    // This is computed lazily, so it won't work if you capture StackmapGenerationParams by value.
+    // Returns true if the successor at the given index is going to be emitted right after the
+    // patchpoint.
+    JS_EXPORT_PRIVATE bool fallsThroughToSuccessor(unsigned successorIndex) const;
+
+    // This is provided for convenience; it means that you don't have to capture it if you don't want to.
+    JS_EXPORT_PRIVATE Procedure& proc() const;
+    
+    // The Air::GenerationContext gives you even more power.
+    Air::GenerationContext& context() const { return m_context; };
+
+    template
+    void addLatePath(const Functor& functor) const
+    {
+        context().latePaths.append(
+            createSharedTask(
+                [=] (CCallHelpers& jit, Air::GenerationContext&) {
+                    functor(jit);
+                }));
+    }
+
+private:
+    friend class CheckSpecial;
+    friend class PatchpointSpecial;
+    
+    StackmapGenerationParams(StackmapValue*, const Vector& reps, Air::GenerationContext&);
+
+    StackmapValue* m_value;
+    Vector m_reps;
+    Vector m_gpScratch;
+    Vector m_fpScratch;
+    Air::GenerationContext& m_context;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3StackmapSpecial.cpp b/b3/B3StackmapSpecial.cpp
new file mode 100644
index 0000000..b5aa6c3
--- /dev/null
+++ b/b3/B3StackmapSpecial.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 {
+
+using namespace Air;
+
+StackmapSpecial::StackmapSpecial()
+{
+}
+
+StackmapSpecial::~StackmapSpecial()
+{
+}
+
+void StackmapSpecial::reportUsedRegisters(Inst& inst, const RegisterSet& usedRegisters)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // FIXME: If the Inst that uses the StackmapSpecial gets duplicated, then we end up merging used
+    // register sets from multiple places. This currently won't happen since Air doesn't have taildup
+    // or things like that. But maybe eventually it could be a problem.
+    value->m_usedRegisters.merge(usedRegisters);
+}
+
+RegisterSet StackmapSpecial::extraClobberedRegs(Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    return value->lateClobbered();
+}
+
+RegisterSet StackmapSpecial::extraEarlyClobberedRegs(Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    return value->earlyClobbered();
+}
+
+void StackmapSpecial::forEachArgImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst, RoleMode roleMode, std::optional firstRecoverableIndex,
+    const ScopedLambda& callback)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // Check that insane things have not happened.
+    ASSERT(inst.args.size() >= numIgnoredAirArgs);
+    ASSERT(value->children().size() >= numIgnoredB3Args);
+    ASSERT(inst.args.size() - numIgnoredAirArgs >= value->children().size() - numIgnoredB3Args);
+    
+    for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+        Arg& arg = inst.args[i + numIgnoredAirArgs];
+        ConstrainedValue child = value->constrainedChild(i + numIgnoredB3Args);
+
+        Arg::Role role;
+        switch (roleMode) {
+        case ForceLateUseUnlessRecoverable:
+            ASSERT(firstRecoverableIndex);
+            if (arg != inst.args[*firstRecoverableIndex] && arg != inst.args[*firstRecoverableIndex + 1]) {
+                role = Arg::LateColdUse;
+                break;
+            }
+            FALLTHROUGH;
+        case SameAsRep:
+            switch (child.rep().kind()) {
+            case ValueRep::WarmAny:
+            case ValueRep::SomeRegister:
+            case ValueRep::Register:
+            case ValueRep::Stack:
+            case ValueRep::StackArgument:
+            case ValueRep::Constant:
+                role = Arg::Use;
+                break;
+            case ValueRep::LateRegister:
+                role = Arg::LateUse;
+                break;
+            case ValueRep::ColdAny:
+                role = Arg::ColdUse;
+                break;
+            case ValueRep::LateColdAny:
+                role = Arg::LateColdUse;
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+            break;
+        case ForceLateUse:
+            role = Arg::LateColdUse;
+            break;
+        }
+
+        Type type = child.value()->type();
+        callback(arg, role, Arg::typeForB3Type(type), Arg::widthForB3Type(type));
+    }
+}
+
+bool StackmapSpecial::isValidImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    // Check that insane things have not happened.
+    ASSERT(inst.args.size() >= numIgnoredAirArgs);
+    ASSERT(value->children().size() >= numIgnoredB3Args);
+
+    // For the Inst to be valid, it needs to have the right number of arguments.
+    if (inst.args.size() - numIgnoredAirArgs < value->children().size() - numIgnoredB3Args)
+        return false;
+
+    // Regardless of constraints, stackmaps have some basic requirements for their arguments. For
+    // example, you can't have a non-FP-offset address. This verifies those conditions as well as the
+    // argument types.
+    for (unsigned i = 0; i < value->children().size() - numIgnoredB3Args; ++i) {
+        Value* child = value->child(i + numIgnoredB3Args);
+        Arg& arg = inst.args[i + numIgnoredAirArgs];
+
+        if (!isArgValidForValue(arg, child))
+            return false;
+    }
+
+    // The number of constraints has to be no greater than the number of B3 children.
+    ASSERT(value->m_reps.size() <= value->children().size());
+
+    // Verify any explicitly supplied constraints.
+    for (unsigned i = numIgnoredB3Args; i < value->m_reps.size(); ++i) {
+        ValueRep& rep = value->m_reps[i];
+        Arg& arg = inst.args[i - numIgnoredB3Args + numIgnoredAirArgs];
+
+        if (!isArgValidForRep(code(), arg, rep))
+            return false;
+    }
+
+    return true;
+}
+
+bool StackmapSpecial::admitsStackImpl(
+    unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+    Inst& inst, unsigned argIndex)
+{
+    StackmapValue* value = inst.origin->as();
+    ASSERT(value);
+
+    unsigned stackmapArgIndex = argIndex - numIgnoredAirArgs + numIgnoredB3Args;
+
+    if (stackmapArgIndex >= value->numChildren()) {
+        // It's not a stackmap argument, so as far as we are concerned, it doesn't admit stack.
+        return false;
+    }
+
+    if (stackmapArgIndex >= value->m_reps.size()) {
+        // This means that there was no constraint.
+        return true;
+    }
+    
+    // We only admit stack for Any's, since Stack is not a valid input constraint, and StackArgument
+    // translates to a CallArg in Air.
+    if (value->m_reps[stackmapArgIndex].isAny())
+        return true;
+
+    return false;
+}
+
+Vector StackmapSpecial::repsImpl(
+    GenerationContext& context, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Inst& inst)
+{
+    Vector result;
+    for (unsigned i = 0; i < inst.origin->numChildren() - numIgnoredB3Args; ++i)
+        result.append(repForArg(*context.code, inst.args[i + numIgnoredAirArgs]));
+    return result;
+}
+
+bool StackmapSpecial::isArgValidForValue(const Air::Arg& arg, Value* value)
+{
+    switch (arg.kind()) {
+    case Arg::Tmp:
+    case Arg::Imm:
+    case Arg::BigImm:
+        break;
+    default:
+        if (!arg.isStackMemory())
+            return false;
+        break;
+    }
+
+    return arg.canRepresent(value);
+}
+
+bool StackmapSpecial::isArgValidForRep(Air::Code& code, const Air::Arg& arg, const ValueRep& rep)
+{
+    switch (rep.kind()) {
+    case ValueRep::WarmAny:
+    case ValueRep::ColdAny:
+    case ValueRep::LateColdAny:
+        // We already verified by isArgValidForValue().
+        return true;
+    case ValueRep::SomeRegister:
+    case ValueRep::SomeEarlyRegister:
+        return arg.isTmp();
+    case ValueRep::LateRegister:
+    case ValueRep::Register:
+        return arg == Tmp(rep.reg());
+    case ValueRep::StackArgument:
+        if (arg == Arg::callArg(rep.offsetFromSP()))
+            return true;
+        if (arg.isAddr() && code.frameSize()) {
+            if (arg.base() == Tmp(GPRInfo::callFrameRegister)
+                && arg.offset() == rep.offsetFromSP() - code.frameSize())
+                return true;
+            if (arg.base() == Tmp(MacroAssembler::stackPointerRegister)
+                && arg.offset() == rep.offsetFromSP())
+                return true;
+        }
+        return false;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+}
+
+ValueRep StackmapSpecial::repForArg(Code& code, const Arg& arg)
+{
+    switch (arg.kind()) {
+    case Arg::Tmp:
+        return ValueRep::reg(arg.reg());
+        break;
+    case Arg::Imm:
+    case Arg::BigImm:
+        return ValueRep::constant(arg.value());
+        break;
+    case Arg::Addr:
+        if (arg.base() == Tmp(GPRInfo::callFrameRegister))
+            return ValueRep::stack(arg.offset());
+        ASSERT(arg.base() == Tmp(MacroAssembler::stackPointerRegister));
+        return ValueRep::stack(arg.offset() - static_cast(code.frameSize()));
+    default:
+        ASSERT_NOT_REACHED();
+        return ValueRep();
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, StackmapSpecial::RoleMode mode)
+{
+    switch (mode) {
+    case StackmapSpecial::SameAsRep:
+        out.print("SameAsRep");
+        return;
+    case StackmapSpecial::ForceLateUseUnlessRecoverable:
+        out.print("ForceLateUseUnlessRecoverable");
+        return;
+    case StackmapSpecial::ForceLateUse:
+        out.print("ForceLateUse");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3StackmapSpecial.h b/b3/B3StackmapSpecial.h
new file mode 100644
index 0000000..97a0813
--- /dev/null
+++ b/b3/B3StackmapSpecial.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirSpecial.h"
+#include "B3ValueRep.h"
+
+namespace JSC { namespace B3 {
+
+namespace Air { class Code; }
+
+// This is a base class for specials that have stackmaps. Note that it can find the Stackmap by
+// asking for the Inst's origin. Hence, these objects don't need to even hold a reference to the
+// Stackmap.
+
+class StackmapSpecial : public Air::Special {
+public:
+    StackmapSpecial();
+    virtual ~StackmapSpecial();
+
+    enum RoleMode : int8_t {
+        SameAsRep,
+        ForceLateUseUnlessRecoverable,
+        ForceLateUse
+    };
+
+protected:
+    void reportUsedRegisters(Air::Inst&, const RegisterSet&) override;
+    RegisterSet extraEarlyClobberedRegs(Air::Inst&) override;
+    RegisterSet extraClobberedRegs(Air::Inst&) override;
+
+    // Note that this does not override generate() or dumpImpl()/deepDumpImpl(). We have many some
+    // subclasses that implement that.
+    void forEachArgImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&, RoleMode, std::optional firstRecoverableIndex,
+        const ScopedLambda&);
+    
+    bool isValidImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&);
+    bool admitsStackImpl(
+        unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs,
+        Air::Inst&, unsigned argIndex);
+
+    // Appends the reps for the Inst's args, starting with numIgnoredArgs, to the given vector.
+    Vector repsImpl(
+        Air::GenerationContext&, unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, Air::Inst&);
+
+    static bool isArgValidForValue(const Air::Arg&, Value*);
+    static bool isArgValidForRep(Air::Code&, const Air::Arg&, const ValueRep&);
+    static ValueRep repForArg(Air::Code&, const Air::Arg&);
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::StackmapSpecial::RoleMode);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3StackmapValue.cpp b/b3/B3StackmapValue.cpp
new file mode 100644
index 0000000..9b0db2f
--- /dev/null
+++ b/b3/B3StackmapValue.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3StackmapValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+StackmapValue::~StackmapValue()
+{
+}
+
+void StackmapValue::append(Value* value, const ValueRep& rep)
+{
+    if (rep == ValueRep::ColdAny) {
+        children().append(value);
+        return;
+    }
+
+    while (m_reps.size() < numChildren())
+        m_reps.append(ValueRep::ColdAny);
+
+    children().append(value);
+    m_reps.append(rep);
+}
+
+void StackmapValue::appendSomeRegister(Value* value)
+{
+    append(ConstrainedValue(value, ValueRep::SomeRegister));
+}
+
+void StackmapValue::setConstrainedChild(unsigned index, const ConstrainedValue& constrainedValue)
+{
+    child(index) = constrainedValue.value();
+    setConstraint(index, constrainedValue.rep());
+}
+
+void StackmapValue::setConstraint(unsigned index, const ValueRep& rep)
+{
+    if (rep == ValueRep(ValueRep::ColdAny))
+        return;
+
+    while (m_reps.size() <= index)
+        m_reps.append(ValueRep::ColdAny);
+
+    m_reps[index] = rep;
+}
+
+void StackmapValue::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+    for (ConstrainedValue value : constrainedChildren())
+        out.print(comma, value);
+}
+
+void StackmapValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(
+        comma, "generator = ", RawPointer(m_generator.get()), ", earlyClobbered = ", m_earlyClobbered,
+        ", lateClobbered = ", m_lateClobbered, ", usedRegisters = ", m_usedRegisters);
+}
+
+StackmapValue::StackmapValue(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+    : Value(CheckedOpcode, kind, type, origin)
+{
+    ASSERT(accepts(kind));
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3StackmapValue.h b/b3/B3StackmapValue.h
new file mode 100644
index 0000000..66fc644
--- /dev/null
+++ b/b3/B3StackmapValue.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3ConstrainedValue.h"
+#include "B3Value.h"
+#include "B3ValueRep.h"
+#include "CCallHelpers.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class StackmapGenerationParams;
+
+typedef void StackmapGeneratorFunction(CCallHelpers&, const StackmapGenerationParams&);
+typedef SharedTask StackmapGenerator;
+
+class JS_EXPORT_PRIVATE StackmapValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        // This needs to include opcodes of all subclasses.
+        switch (kind.opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+        case Check:
+        case Patchpoint:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    ~StackmapValue();
+
+    // Use this to add children. Note that you could also add children by doing
+    // children().append(). That will work fine, but it's not recommended.
+    void append(const ConstrainedValue& value)
+    {
+        append(value.value(), value.rep());
+    }
+
+    void append(Value*, const ValueRep&);
+
+    template
+    void appendVector(const VectorType& vector)
+    {
+        for (const auto& value : vector)
+            append(value);
+    }
+
+    // Helper for appending a bunch of values with some ValueRep.
+    template
+    void appendVectorWithRep(const VectorType& vector, const ValueRep& rep)
+    {
+        for (Value* value : vector)
+            append(value, rep);
+    }
+
+    // Helper for appending cold any's. This often used by clients to implement OSR.
+    template
+    void appendColdAnys(const VectorType& vector)
+    {
+        appendVectorWithRep(vector, ValueRep::ColdAny);
+    }
+    template
+    void appendLateColdAnys(const VectorType& vector)
+    {
+        appendVectorWithRep(vector, ValueRep::LateColdAny);
+    }
+
+    // This is a helper for something you might do a lot of: append a value that should be constrained
+    // to SomeRegister.
+    void appendSomeRegister(Value*);
+
+    const Vector& reps() const { return m_reps; }
+
+    // Stackmaps allow you to specify that the operation may clobber some registers. Clobbering a register
+    // means that the operation appears to store a value into the register, but the compiler doesn't
+    // assume to know anything about what kind of value might have been stored. In B3's model of
+    // execution, registers are read or written at instruction boundaries rather than inside the
+    // instructions themselves. A register could be read or written immediately before the instruction
+    // executes, or immediately after. Note that at a boundary between instruction A and instruction B we
+    // simultaneously look at what A does after it executes and what B does before it executes. This is
+    // because when the compiler considers what happens to registers, it views the boundary between two
+    // instructions as a kind of atomic point where the late effects of A happen at the same time as the
+    // early effects of B.
+    //
+    // The compiler views a stackmap as a single instruction, even though of course the stackmap may be
+    // composed of any number of instructions (if it's a Patchpoint). You can claim that a stackmap value
+    // clobbers a set of registers before the stackmap's instruction or after. Clobbering before is called
+    // early clobber, while clobbering after is called late clobber.
+    //
+    // This is quite flexible but it has its limitations. Any register listed as an early clobber will
+    // interfere with all uses of the stackmap. Any register listed as a late clobber will interfere with
+    // all defs of the stackmap (i.e. the result). This means that it's currently not possible to claim
+    // to clobber a register while still allowing that register to be used for both an input and an output
+    // of the instruction. It just so happens that B3's sole client (the FTL) currently never wants to
+    // convey such a constraint, but it will want it eventually (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151823).
+    //
+    // Note that a common use case of early clobber sets is to indicate that this is the set of registers
+    // that shall not be used for inputs to the value. But B3 supports two different ways of specifying
+    // this, the other being LateUse in combination with late clobber (not yet available to stackmaps
+    // directly, FIXME: https://bugs.webkit.org/show_bug.cgi?id=151335). A late use makes the use of that
+    // value appear to happen after the instruction. This means that a late use cannot use the same
+    // register as the result and it cannot use the same register as either early or late clobbered
+    // registers. Late uses are usually a better way of saying that a clobbered register cannot be used
+    // for an input. Early clobber means that some register(s) interfere with *all* inputs, while LateUse
+    // means that some value interferes with whatever is live after the instruction. Below is a list of
+    // examples of how the FTL can handle its various kinds of scenarios using a combination of early
+    // clobber, late clobber, and late use. These examples are for X86_64, w.l.o.g.
+    //
+    // Basic ById patchpoint: Early and late clobber of r11. Early clobber prevents any inputs from using
+    // r11 since that would mess with the MacroAssembler's assumptions when we
+    // AllowMacroScratchRegisterUsage. Late clobber tells B3 that the patchpoint may overwrite r11.
+    //
+    // ById patchpoint in a try block with some live state: This might throw an exception after already
+    // assigning to the result. So, this should LateUse all stackmap values to ensure that the stackmap
+    // values don't interfere with the result. Note that we do not LateUse the non-OSR inputs of the ById
+    // since LateUse implies that the use is cold: the register allocator will assume that the use is not
+    // important for the critical path. Also, early and late clobber of r11.
+    //
+    // Basic ByIdFlush patchpoint: We could do Flush the same way we did it with LLVM: ignore it and let
+    // PolymorphicAccess figure it out. Or, we could add internal clobber support (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151823). Or, we could do it by early clobbering r11, late
+    // clobbering all volatile registers, and constraining the result to some register. Or, we could do
+    // that but leave the result constrained to SomeRegister, which will cause it to use a callee-save
+    // register. Internal clobber support would allow us to use SomeRegister while getting the result into
+    // a volatile register.
+    //
+    // ByIdFlush patchpoint in a try block with some live state: LateUse all for-OSR stackmap values,
+    // early clobber of r11 to prevent the other inputs from using r11, and late clobber of all volatile
+    // registers to make way for the call. To handle the result, we could do any of what is listed in the
+    // previous paragraph.
+    //
+    // Basic JS call: Force all non-OSR inputs into specific locations (register, stack, whatever).
+    // All volatile registers are late-clobbered. The output is constrained to a register as well.
+    //
+    // JS call in a try block with some live state: LateUse all for-OSR stackmap values, fully constrain
+    // all non-OSR inputs and the result, and late clobber all volatile registers.
+    //
+    // JS tail call: Pass all inputs as a warm variant of Any (FIXME:
+    // https://bugs.webkit.org/show_bug.cgi?id=151811).
+    //
+    // Note that we cannot yet do all of these things because although Air already supports all of these
+    // various forms of uses (LateUse and warm unconstrained use), B3 doesn't yet expose all of it. The
+    // bugs are:
+    // https://bugs.webkit.org/show_bug.cgi?id=151335 (LateUse)
+    // https://bugs.webkit.org/show_bug.cgi?id=151811 (warm Any)
+    void clobberEarly(const RegisterSet& set)
+    {
+        m_earlyClobbered.merge(set);
+    }
+
+    void clobberLate(const RegisterSet& set)
+    {
+        m_lateClobbered.merge(set);
+    }
+
+    void clobber(const RegisterSet& set)
+    {
+        clobberEarly(set);
+        clobberLate(set);
+    }
+
+    RegisterSet& earlyClobbered() { return m_earlyClobbered; }
+    RegisterSet& lateClobbered() { return m_lateClobbered; }
+    const RegisterSet& earlyClobbered() const { return m_earlyClobbered; }
+    const RegisterSet& lateClobbered() const { return m_lateClobbered; }
+
+    void setGenerator(RefPtr generator)
+    {
+        m_generator = generator;
+    }
+
+    template
+    void setGenerator(const Functor& functor)
+    {
+        m_generator = createSharedTask(functor);
+    }
+
+    RefPtr generator() const { return m_generator; }
+
+    ConstrainedValue constrainedChild(unsigned index) const
+    {
+        return ConstrainedValue(child(index), index < m_reps.size() ? m_reps[index] : ValueRep::ColdAny);
+    }
+
+    void setConstrainedChild(unsigned index, const ConstrainedValue&);
+    
+    void setConstraint(unsigned index, const ValueRep&);
+
+    class ConstrainedValueCollection {
+    public:
+        ConstrainedValueCollection(const StackmapValue& value)
+            : m_value(value)
+        {
+        }
+
+        unsigned size() const { return m_value.numChildren(); }
+        
+        ConstrainedValue at(unsigned index) const { return m_value.constrainedChild(index); }
+
+        ConstrainedValue operator[](unsigned index) const { return at(index); }
+
+        class iterator {
+        public:
+            iterator()
+                : m_collection(nullptr)
+                , m_index(0)
+            {
+            }
+
+            iterator(const ConstrainedValueCollection& collection, unsigned index)
+                : m_collection(&collection)
+                , m_index(index)
+            {
+            }
+
+            ConstrainedValue operator*() const
+            {
+                return m_collection->at(m_index);
+            }
+
+            iterator& operator++()
+            {
+                m_index++;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_collection == other.m_collection);
+                return m_index == other.m_index;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+            
+        private:
+            const ConstrainedValueCollection* m_collection;
+            unsigned m_index;
+        };
+
+        iterator begin() const { return iterator(*this, 0); }
+        iterator end() const { return iterator(*this, size()); }
+
+    private:
+        const StackmapValue& m_value;
+    };
+
+    ConstrainedValueCollection constrainedChildren() const
+    {
+        return ConstrainedValueCollection(*this);
+    }
+
+protected:
+    void dumpChildren(CommaPrinter&, PrintStream&) const override;
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    StackmapValue(CheckedOpcodeTag, Kind, Type, Origin);
+
+private:
+    friend class CheckSpecial;
+    friend class PatchpointSpecial;
+    friend class StackmapGenerationParams;
+    friend class StackmapSpecial;
+    
+    Vector m_reps;
+    RefPtr m_generator;
+    RegisterSet m_earlyClobbered;
+    RegisterSet m_lateClobbered;
+    RegisterSet m_usedRegisters; // Stackmaps could be further duplicated by Air, but that's unlikely, so we just merge the used registers sets if that were to happen.
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SuccessorCollection.h b/b3/B3SuccessorCollection.h
new file mode 100644
index 0000000..0a7df24
--- /dev/null
+++ b/b3/B3SuccessorCollection.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+// This is a generic wrapper around lists of frequented blocks, which gives you just the blocks.
+
+template
+class SuccessorCollection {
+public:
+    SuccessorCollection(SuccessorList& list)
+        : m_list(list)
+    {
+    }
+
+    size_t size() const { return m_list.size(); }
+    BasicBlock* at(size_t index) const { return m_list[index].block(); }
+    BasicBlock*& at(size_t index) { return m_list[index].block(); }
+    BasicBlock* operator[](size_t index) const { return at(index); }
+    BasicBlock*& operator[](size_t index) { return at(index); }
+
+    class iterator {
+    public:
+        iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(SuccessorCollection& collection, size_t index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        BasicBlock*& operator*() const
+        {
+            return m_collection->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        SuccessorCollection* m_collection;
+        size_t m_index;
+    };
+
+    iterator begin() { return iterator(*this, 0); }
+    iterator end() { return iterator(*this, size()); }
+
+    class const_iterator {
+    public:
+        const_iterator()
+            : m_collection(nullptr)
+            , m_index(0)
+        {
+        }
+
+        const_iterator(const SuccessorCollection& collection, size_t index)
+            : m_collection(&collection)
+            , m_index(index)
+        {
+        }
+
+        BasicBlock* operator*() const
+        {
+            return m_collection->at(m_index);
+        }
+
+        const_iterator& operator++()
+        {
+            m_index++;
+            return *this;
+        }
+
+        bool operator==(const const_iterator& other) const
+        {
+            ASSERT(m_collection == other.m_collection);
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const const_iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const SuccessorCollection* m_collection;
+        size_t m_index;
+    };
+
+    const_iterator begin() const { return const_iterator(*this, 0); }
+    const_iterator end() const { return const_iterator(*this, size()); }
+
+private:
+    SuccessorList& m_list;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SwitchCase.cpp b/b3/B3SwitchCase.cpp
new file mode 100644
index 0000000..d05332b
--- /dev/null
+++ b/b3/B3SwitchCase.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SwitchCase.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+
+namespace JSC { namespace B3 {
+
+void SwitchCase::dump(PrintStream& out) const
+{
+    out.print(m_caseValue, "->", m_target);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SwitchCase.h b/b3/B3SwitchCase.h
new file mode 100644
index 0000000..5ba6a48
--- /dev/null
+++ b/b3/B3SwitchCase.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3FrequentedBlock.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class SwitchCase {
+public:
+    SwitchCase()
+    {
+    }
+
+    SwitchCase(int64_t caseValue, const FrequentedBlock& target)
+        : m_caseValue(caseValue)
+        , m_target(target)
+    {
+    }
+
+    explicit operator bool() const { return !!m_target; }
+
+    int64_t caseValue() const { return m_caseValue; }
+    FrequentedBlock target() const { return m_target; }
+    BasicBlock* targetBlock() const { return m_target.block(); }
+
+    void dump(PrintStream& out) const;
+
+private:
+    int64_t m_caseValue;
+    FrequentedBlock m_target;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SwitchValue.cpp b/b3/B3SwitchValue.cpp
new file mode 100644
index 0000000..8b88034
--- /dev/null
+++ b/b3/B3SwitchValue.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3SwitchValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlock.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+SwitchValue::~SwitchValue()
+{
+}
+
+SwitchCase SwitchValue::removeCase(BasicBlock* block, unsigned index)
+{
+    FrequentedBlock resultBlock = block->successor(index);
+    int64_t resultValue = m_values[index];
+    block->successor(index) = block->successors().last();
+    block->successors().removeLast();
+    m_values[index] = m_values.last();
+    m_values.removeLast();
+    return SwitchCase(resultValue, resultBlock);
+}
+
+bool SwitchValue::hasFallThrough(const BasicBlock* block) const
+{
+    unsigned numSuccessors = block->numSuccessors();
+    unsigned numValues = m_values.size();
+    RELEASE_ASSERT(numValues == numSuccessors || numValues + 1 == numSuccessors);
+    
+    return numValues + 1 == numSuccessors;
+}
+
+bool SwitchValue::hasFallThrough() const
+{
+    return hasFallThrough(owner);
+}
+
+void SwitchValue::setFallThrough(BasicBlock* block, const FrequentedBlock& target)
+{
+    if (!hasFallThrough())
+        block->successors().append(target);
+    else
+        block->successors().last() = target;
+    ASSERT(hasFallThrough(block));
+}
+
+void SwitchValue::appendCase(BasicBlock* block, const SwitchCase& switchCase)
+{
+    if (!hasFallThrough())
+        block->successors().append(switchCase.target());
+    else {
+        block->successors().append(block->successors().last());
+        block->successor(block->numSuccessors() - 2) = switchCase.target();
+    }
+    m_values.append(switchCase.caseValue());
+}
+
+void SwitchValue::setFallThrough(const FrequentedBlock& target)
+{
+    setFallThrough(owner, target);
+}
+
+void SwitchValue::appendCase(const SwitchCase& switchCase)
+{
+    appendCase(owner, switchCase);
+}
+
+void SwitchValue::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+    // We must not crash due to a number-of-successors mismatch! Someone debugging a
+    // number-of-successors bug will want to dump IR!
+    if (numCaseValues() + 1 != block->numSuccessors()) {
+        Value::dumpSuccessors(block, out);
+        return;
+    }
+    
+    out.print(cases(block));
+}
+
+void SwitchValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, "cases = [", listDump(m_values), "]");
+}
+
+Value* SwitchValue::cloneImpl() const
+{
+    return new SwitchValue(*this);
+}
+
+SwitchValue::SwitchValue(Origin origin, Value* child)
+    : Value(CheckedOpcode, Switch, Void, origin, child)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3SwitchValue.h b/b3/B3SwitchValue.h
new file mode 100644
index 0000000..a1c27cd
--- /dev/null
+++ b/b3/B3SwitchValue.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CaseCollection.h"
+#include "B3SwitchCase.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class SwitchValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Switch; }
+
+    ~SwitchValue();
+
+    // numCaseValues() + 1 == numSuccessors().
+    unsigned numCaseValues() const { return m_values.size(); }
+
+    // The successor for this case value is at the same index.
+    int64_t caseValue(unsigned index) const { return m_values[index]; }
+    
+    const Vector& caseValues() const { return m_values; }
+
+    CaseCollection cases(const BasicBlock* owner) const { return CaseCollection(this, owner); }
+    CaseCollection cases() const { return cases(owner); }
+
+    // This removes the case and reorders things a bit. If you're iterating the cases from 0 to N,
+    // then you can keep iterating after this so long as you revisit this same index (which will now
+    // contain some other case value). This removes the case that was removed.
+    SwitchCase removeCase(BasicBlock*, unsigned index);
+
+    bool hasFallThrough(const BasicBlock*) const;
+    bool hasFallThrough() const;
+
+    // These two functions can be called in any order.
+    void setFallThrough(BasicBlock*, const FrequentedBlock&);
+    void appendCase(BasicBlock*, const SwitchCase&);
+    
+    JS_EXPORT_PRIVATE void setFallThrough(const FrequentedBlock&);
+    JS_EXPORT_PRIVATE void appendCase(const SwitchCase&);
+
+    void dumpSuccessors(const BasicBlock*, PrintStream&) const override;
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE SwitchValue(Origin, Value* child);
+
+    Vector m_values;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3TimingScope.cpp b/b3/B3TimingScope.cpp
new file mode 100644
index 0000000..d8ad421
--- /dev/null
+++ b/b3/B3TimingScope.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3TimingScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+TimingScope::TimingScope(const char* name)
+    : m_name(name)
+{
+    if (shouldMeasurePhaseTiming())
+        m_before = monotonicallyIncreasingTimeMS();
+}
+
+TimingScope::~TimingScope()
+{
+    if (shouldMeasurePhaseTiming()) {
+        double after = monotonicallyIncreasingTimeMS();
+        dataLog("[B3] ", m_name, " took: ", after - m_before, " ms.\n");
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3TimingScope.h b/b3/B3TimingScope.h
new file mode 100644
index 0000000..a957a0e
--- /dev/null
+++ b/b3/B3TimingScope.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 {
+
+class TimingScope {
+    WTF_MAKE_NONCOPYABLE(TimingScope);
+public:
+    TimingScope(const char* name);
+    ~TimingScope();
+
+private:
+    const char* m_name;
+    double m_before;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Type.cpp b/b3/B3Type.cpp
new file mode 100644
index 0000000..0057eaf
--- /dev/null
+++ b/b3/B3Type.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Type.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, Type type)
+{
+    switch (type) {
+    case Void:
+        out.print("Void");
+        return;
+    case Int32:
+        out.print("Int32");
+        return;
+    case Int64:
+        out.print("Int64");
+        return;
+    case Float:
+        out.print("Float");
+        return;
+    case Double:
+        out.print("Double");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Type.h b/b3/B3Type.h
new file mode 100644
index 0000000..4ceaa8a
--- /dev/null
+++ b/b3/B3Type.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Common.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+enum Type : int8_t {
+    Void,
+    Int32,
+    Int64,
+    Float,
+    Double,
+};
+
+inline bool isInt(Type type)
+{
+    return type == Int32 || type == Int64;
+}
+
+inline bool isFloat(Type type)
+{
+    return type == Float || type == Double;
+}
+
+inline Type pointerType()
+{
+    if (is32Bit())
+        return Int32;
+    return Int64;
+}
+
+inline size_t sizeofType(Type type)
+{
+    switch (type) {
+    case Void:
+        return 0;
+    case Int32:
+    case Float:
+        return 4;
+    case Int64:
+    case Double:
+        return 8;
+    }
+    ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Type);
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3TypeMap.h b/b3/B3TypeMap.h
new file mode 100644
index 0000000..c0ea413
--- /dev/null
+++ b/b3/B3TypeMap.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Type.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+template
+class TypeMap {
+public:
+    TypeMap()
+        : m_void()
+        , m_int32()
+        , m_int64()
+        , m_float()
+        , m_double()
+    {
+    }
+    
+    T& at(Type type)
+    {
+        switch (type) {
+        case Void:
+            return m_void;
+        case Int32:
+            return m_int32;
+        case Int64:
+            return m_int64;
+        case Float:
+            return m_float;
+        case Double:
+            return m_double;
+        }
+        ASSERT_NOT_REACHED();
+    }
+    
+    const T& at(Type type) const
+    {
+        return bitwise_cast(this)->at(type);
+    }
+    
+    T& operator[](Type type)
+    {
+        return at(type);
+    }
+    
+    const T& operator[](Type type) const
+    {
+        return at(type);
+    }
+    
+    void dump(PrintStream& out) const
+    {
+        out.print(
+            "{void = ", m_void,
+            ", int32 = ", m_int32,
+            ", int64 = ", m_int64,
+            ", float = ", m_float,
+            ", double = ", m_double, "}");
+    }
+    
+private:
+    T m_void;
+    T m_int32;
+    T m_int64;
+    T m_float;
+    T m_double;
+};
+
+} } // namespace JSC::B3
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3UpsilonValue.cpp b/b3/B3UpsilonValue.cpp
new file mode 100644
index 0000000..c87432f
--- /dev/null
+++ b/b3/B3UpsilonValue.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3UpsilonValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+UpsilonValue::~UpsilonValue()
+{
+}
+
+void UpsilonValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    if (m_phi)
+        out.print(comma, "^", m_phi->index());
+    else {
+        // We want to have a dump for when the Phi isn't set yet, since although such IR won't pass
+        // validation, we may have such IR as an intermediate step.
+        out.print(comma, "^(null)");
+    }
+}
+
+Value* UpsilonValue::cloneImpl() const
+{
+    return new UpsilonValue(*this);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3UpsilonValue.h b/b3/B3UpsilonValue.h
new file mode 100644
index 0000000..4c479e4
--- /dev/null
+++ b/b3/B3UpsilonValue.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE UpsilonValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Upsilon; }
+
+    ~UpsilonValue();
+
+    Value* phi() const { return m_phi; }
+    void setPhi(Value* phi)
+    {
+        ASSERT(child(0)->type() == phi->type());
+        ASSERT(phi->opcode() == Phi);
+        m_phi = phi;
+    }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Note that passing the Phi during construction is optional. A valid pattern is to first create
+    // the Upsilons without the Phi, then create the Phi, then go back and tell the Upsilons about
+    // the Phi. This allows you to emit code in its natural order.
+    UpsilonValue(Origin origin, Value* value, Value* phi = nullptr)
+        : Value(CheckedOpcode, Upsilon, Void, origin, value)
+        , m_phi(phi)
+    {
+        if (phi)
+            ASSERT(value->type() == phi->type());
+    }
+
+    Value* m_phi;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3UseCounts.cpp b/b3/B3UseCounts.cpp
new file mode 100644
index 0000000..5fe18d4
--- /dev/null
+++ b/b3/B3UseCounts.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3UseCounts.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 {
+
+UseCounts::UseCounts(Procedure& procedure)
+    : m_counts(procedure.values().size())
+{
+    Vector children;
+    for (Value* value : procedure.values()) {
+        children.resize(0);
+        for (Value* child : value->children()) {
+            m_counts[child].numUses++;
+            children.append(child);
+        }
+        std::sort(children.begin(), children.end());
+        Value* last = nullptr;
+        for (Value* child : children) {
+            if (child == last)
+                continue;
+
+            m_counts[child].numUsingInstructions++;
+            last = child;
+        }
+    }
+}
+
+UseCounts::~UseCounts()
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3UseCounts.h b/b3/B3UseCounts.h
new file mode 100644
index 0000000..f5a0492
--- /dev/null
+++ b/b3/B3UseCounts.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class UseCounts {
+public:
+    JS_EXPORT_PRIVATE UseCounts(Procedure&);
+    JS_EXPORT_PRIVATE ~UseCounts();
+
+    unsigned numUses(Value* value) const { return m_counts[value].numUses; }
+    unsigned numUsingInstructions(Value* value) const { return m_counts[value].numUsingInstructions; }
+    
+private:
+    struct Counts {
+        unsigned numUses { 0 };
+        unsigned numUsingInstructions { 0 };
+    };
+    
+    IndexMap m_counts;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Validate.cpp b/b3/B3Validate.cpp
new file mode 100644
index 0000000..f7224cf
--- /dev/null
+++ b/b3/B3Validate.cpp
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Validate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3Dominators.h"
+#include "B3MemoryValue.h"
+#include "B3Procedure.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3Variable.h"
+#include "B3VariableValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+namespace {
+
+class Validater {
+public:
+    Validater(Procedure& procedure, const char* dumpBefore)
+        : m_procedure(procedure)
+        , m_dumpBefore(dumpBefore)
+    {
+    }
+
+#define VALIDATE(condition, message) do {                               \
+        if (condition)                                                  \
+            break;                                                      \
+        fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+    } while (false)
+
+    void run()
+    {
+        HashSet blocks;
+        HashSet valueInProc;
+        HashMap valueInBlock;
+        HashMap valueOwner;
+        HashMap valueIndex;
+
+        for (BasicBlock* block : m_procedure) {
+            blocks.add(block);
+            for (unsigned i = 0; i < block->size(); ++i) {
+                Value* value = block->at(i);
+                valueInBlock.add(value, 0).iterator->value++;
+                valueOwner.add(value, block);
+                valueIndex.add(value, i);
+            }
+        }
+
+        for (Value* value : m_procedure.values())
+            valueInProc.add(value);
+
+        for (Value* value : valueInProc)
+            VALIDATE(valueInBlock.contains(value), ("At ", *value));
+        for (auto& entry : valueInBlock) {
+            VALIDATE(valueInProc.contains(entry.key), ("At ", *entry.key));
+            VALIDATE(entry.value == 1, ("At ", *entry.key));
+        }
+
+        // Compute dominators ourselves to avoid perturbing Procedure.
+        Dominators dominators(m_procedure);
+
+        for (Value* value : valueInProc) {
+            for (Value* child : value->children()) {
+                VALIDATE(child, ("At ", *value));
+                VALIDATE(valueInProc.contains(child), ("At ", *value, "->", pointerDump(child)));
+                if (valueOwner.get(child) == valueOwner.get(value))
+                    VALIDATE(valueIndex.get(value) > valueIndex.get(child), ("At ", *value, "->", pointerDump(child)));
+                else
+                    VALIDATE(dominators.dominates(valueOwner.get(child), valueOwner.get(value)), ("at ", *value, "->", pointerDump(child)));
+            }
+        }
+
+        HashMap> allPredecessors;
+        for (BasicBlock* block : blocks) {
+            VALIDATE(block->size() >= 1, ("At ", *block));
+            for (unsigned i = 0; i < block->size() - 1; ++i)
+                VALIDATE(!block->at(i)->effects().terminal, ("At ", *block->at(i)));
+            VALIDATE(block->last()->effects().terminal, ("At ", *block->last()));
+            
+            for (BasicBlock* successor : block->successorBlocks()) {
+                allPredecessors.add(successor, HashSet()).iterator->value.add(block);
+                VALIDATE(
+                    blocks.contains(successor), ("At ", *block, "->", pointerDump(successor)));
+            }
+        }
+
+        // Note that this totally allows dead code.
+        for (auto& entry : allPredecessors) {
+            BasicBlock* successor = entry.key;
+            HashSet& predecessors = entry.value;
+            VALIDATE(predecessors == successor->predecessors(), ("At ", *successor));
+        }
+
+        for (Value* value : m_procedure.values()) {
+            for (Value* child : value->children())
+                VALIDATE(child->type() != Void, ("At ", *value, "->", *child));
+            switch (value->opcode()) {
+            case Nop:
+            case Fence:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                break;
+            case Identity:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Const32:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Const64:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Int64, ("At ", *value));
+                break;
+            case ConstDouble:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case ConstFloat:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case Set:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->as()->variable()->type(), ("At ", *value));
+                break;
+            case Get:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == value->as()->variable()->type(), ("At ", *value));
+                break;
+            case SlotBase:
+            case FramePointer:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == pointerType(), ("At ", *value));
+                break;
+            case ArgumentReg:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(
+                    (value->as()->argumentReg().isGPR() ? pointerType() : Double)
+                    == value->type(), ("At ", *value));
+                break;
+            case Add:
+            case Sub:
+            case Mul:
+            case Div:
+            case UDiv:
+            case Mod:
+            case UMod:
+            case BitAnd:
+            case BitXor:
+                VALIDATE(!value->kind().traps(), ("At ", *value));
+                switch (value->opcode()) {
+                case Div:
+                case Mod:
+                    if (value->isChill()) {
+                        VALIDATE(value->opcode() == Div || value->opcode() == Mod, ("At ", *value));
+                        VALIDATE(isInt(value->type()), ("At ", *value));
+                    }
+                    break;
+                default:
+                    VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                    break;
+                }
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Neg:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case BitOr:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(isInt(value->type()), ("At ", *value));
+                break;
+            case Shl:
+            case SShr:
+            case ZShr:
+            case RotR:
+                case RotL:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->type() == value->child(0)->type(), ("At ", *value));
+                VALIDATE(value->child(1)->type() == Int32, ("At ", *value));
+                VALIDATE(isInt(value->type()), ("At ", *value));
+                break;
+            case BitwiseCast:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->type() != value->child(0)->type(), ("At ", *value));
+                VALIDATE(
+                    (value->type() == Int64 && value->child(0)->type() == Double)
+                    || (value->type() == Double && value->child(0)->type() == Int64)
+                    || (value->type() == Float && value->child(0)->type() == Int32)
+                    || (value->type() == Int32 && value->child(0)->type() == Float),
+                    ("At ", *value));
+                break;
+            case SExt8:
+            case SExt16:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case SExt32:
+            case ZExt32:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->type() == Int64, ("At ", *value));
+                break;
+            case Clz:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isInt(value->type()), ("At ", *value));
+                break;
+            case Trunc:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(
+                    (value->type() == Int32 && value->child(0)->type() == Int64)
+                    || (value->type() == Float && value->child(0)->type() == Double),
+                    ("At ", *value));
+                break;
+            case Abs:
+            case Ceil:
+            case Floor:
+            case Sqrt:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isFloat(value->type()), ("At ", *value));
+                break;
+            case IToD:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case IToF:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case FloatToDouble:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Float, ("At ", *value));
+                VALIDATE(value->type() == Double, ("At ", *value));
+                break;
+            case DoubleToFloat:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Double, ("At ", *value));
+                VALIDATE(value->type() == Float, ("At ", *value));
+                break;
+            case Equal:
+            case NotEqual:
+            case LessThan:
+            case GreaterThan:
+            case LessEqual:
+            case GreaterEqual:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Above:
+            case Below:
+            case AboveEqual:
+            case BelowEqual:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case EqualOrUnordered:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(isFloat(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                break;
+            case Select:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 3, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == value->child(1)->type(), ("At ", *value));
+                VALIDATE(value->type() == value->child(2)->type(), ("At ", *value));
+                break;
+            case Load8Z:
+            case Load8S:
+            case Load16Z:
+            case Load16S:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Int32, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Load:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Store8:
+            case Store16:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case Store:
+                VALIDATE(!value->kind().isChill(), ("At ", *value));
+                VALIDATE(value->numChildren() == 2, ("At ", *value));
+                VALIDATE(value->child(1)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                validateStackAccess(value);
+                break;
+            case WasmAddress:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                VALIDATE(value->type() == pointerType(), ("At ", *value));
+                break;
+            case CCall:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value));
+                break;
+            case Patchpoint:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                if (value->type() == Void)
+                    VALIDATE(value->as()->resultConstraint == ValueRep::WarmAny, ("At ", *value));
+                else {
+                    switch (value->as()->resultConstraint.kind()) {
+                    case ValueRep::WarmAny:
+                    case ValueRep::SomeRegister:
+                    case ValueRep::SomeEarlyRegister:
+                    case ValueRep::Register:
+                    case ValueRep::StackArgument:
+                        break;
+                    default:
+                        VALIDATE(false, ("At ", *value));
+                        break;
+                    }
+                    
+                    validateStackmapConstraint(value, ConstrainedValue(value, value->as()->resultConstraint), ConstraintRole::Def);
+                }
+                validateStackmap(value);
+                break;
+            case CheckAdd:
+            case CheckSub:
+            case CheckMul:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 2, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(isInt(value->child(1)->type()), ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(1).rep() == ValueRep::WarmAny, ("At ", *value));
+                validateStackmap(value);
+                break;
+            case Check:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() >= 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->as()->constrainedChild(0).rep() == ValueRep::WarmAny, ("At ", *value));
+                validateStackmap(value);
+                break;
+            case WasmBoundsCheck:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->child(0)->type() == Int32, ("At ", *value));
+                VALIDATE(m_procedure.code().isPinned(value->as()->pinnedGPR()), ("At ", *value));
+                VALIDATE(m_procedure.code().wasmBoundsCheckGenerator(), ("At ", *value));
+                break;
+            case Upsilon:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(value->as()->phi(), ("At ", *value));
+                VALIDATE(value->as()->phi()->opcode() == Phi, ("At ", *value));
+                VALIDATE(value->child(0)->type() == value->as()->phi()->type(), ("At ", *value));
+                VALIDATE(valueInProc.contains(value->as()->phi()), ("At ", *value));
+                break;
+            case Phi:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() != Void, ("At ", *value));
+                break;
+            case Jump:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == 1, ("At ", *value));
+                break;
+            case Oops:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+                break;
+            case Return:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() <= 1, ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(!valueOwner.get(value)->numSuccessors(), ("At ", *value));
+                break;
+            case Branch:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == 2, ("At ", *value));
+                break;
+            case Switch: {
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(value->numChildren() == 1, ("At ", *value));
+                VALIDATE(isInt(value->child(0)->type()), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(value->as()->hasFallThrough(valueOwner.get(value)), ("At ", *value));
+                // This validates the same thing as hasFallThrough, but more explicitly. We want to
+                // make sure that if anyone tries to change the definition of hasFallThrough, they
+                // will feel some pain here, since this is fundamental.
+                VALIDATE(valueOwner.get(value)->numSuccessors() == value->as()->numCaseValues() + 1, ("At ", *value));
+                
+                // Check that there are no duplicate cases.
+                Vector caseValues = value->as()->caseValues();
+                std::sort(caseValues.begin(), caseValues.end());
+                for (unsigned i = 1; i < caseValues.size(); ++i)
+                    VALIDATE(caseValues[i - 1] != caseValues[i], ("At ", *value, ", caseValue = ", caseValues[i]));
+                break;
+            }
+            case EntrySwitch:
+                VALIDATE(!value->kind().hasExtraBits(), ("At ", *value));
+                VALIDATE(!value->numChildren(), ("At ", *value));
+                VALIDATE(value->type() == Void, ("At ", *value));
+                VALIDATE(valueOwner.get(value)->numSuccessors() == m_procedure.numEntrypoints(), ("At ", *value));
+                break;
+            }
+
+            VALIDATE(!(value->effects().writes && value->key()), ("At ", *value));
+        }
+
+        for (Variable* variable : m_procedure.variables())
+            VALIDATE(variable->type() != Void, ("At ", *variable));
+    }
+
+private:
+    void validateStackmap(Value* value)
+    {
+        StackmapValue* stackmap = value->as();
+        VALIDATE(stackmap, ("At ", *value));
+        VALIDATE(stackmap->numChildren() >= stackmap->reps().size(), ("At ", *stackmap));
+        for (ConstrainedValue child : stackmap->constrainedChildren())
+            validateStackmapConstraint(stackmap, child);
+    }
+    
+    enum class ConstraintRole {
+        Use,
+        Def
+    };
+    void validateStackmapConstraint(Value* context, const ConstrainedValue& value, ConstraintRole role = ConstraintRole::Use)
+    {
+        switch (value.rep().kind()) {
+        case ValueRep::WarmAny:
+        case ValueRep::ColdAny:
+        case ValueRep::LateColdAny:
+        case ValueRep::SomeRegister:
+        case ValueRep::StackArgument:
+            break;
+        case ValueRep::SomeEarlyRegister:
+            VALIDATE(role == ConstraintRole::Def, ("At ", *context, ": ", value));
+            break;
+        case ValueRep::Register:
+        case ValueRep::LateRegister:
+            if (value.rep().reg().isGPR())
+                VALIDATE(isInt(value.value()->type()), ("At ", *context, ": ", value));
+            else
+                VALIDATE(isFloat(value.value()->type()), ("At ", *context, ": ", value));
+            break;
+        default:
+            VALIDATE(false, ("At ", *context, ": ", value));
+            break;
+        }
+    }
+
+    void validateStackAccess(Value* value)
+    {
+        MemoryValue* memory = value->as();
+        SlotBaseValue* slotBase = value->lastChild()->as();
+        if (!slotBase)
+            return;
+
+        StackSlot* stack = slotBase->slot();
+
+        VALIDATE(memory->offset() >= 0, ("At ", *value));
+        VALIDATE(memory->offset() + memory->accessByteSize() <= stack->byteSize(), ("At ", *value));
+    }
+    
+    NO_RETURN_DUE_TO_CRASH void fail(
+        const char* filename, int lineNumber, const char* function, const char* condition,
+        CString message)
+    {
+        CString failureMessage;
+        {
+            StringPrintStream out;
+            out.print("B3 VALIDATION FAILURE\n");
+            out.print("    ", condition, " (", filename, ":", lineNumber, ")\n");
+            out.print("    ", message, "\n");
+            out.print("    After ", m_procedure.lastPhaseName(), "\n");
+            failureMessage = out.toCString();
+        }
+
+        dataLog(failureMessage);
+        if (m_dumpBefore) {
+            dataLog("Before ", m_procedure.lastPhaseName(), ":\n");
+            dataLog(m_dumpBefore);
+        }
+        dataLog("At time of failure:\n");
+        dataLog(m_procedure);
+
+        dataLog(failureMessage);
+        WTFReportAssertionFailure(filename, lineNumber, function, condition);
+        CRASH();
+    }
+    
+    Procedure& m_procedure;
+    const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Procedure& procedure, const char* dumpBefore)
+{
+    Validater validater(procedure, dumpBefore);
+    validater.run();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Validate.h b/b3/B3Validate.h
new file mode 100644
index 0000000..d115e22
--- /dev/null
+++ b/b3/B3Validate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+JS_EXPORT_PRIVATE void validate(Procedure&, const char* dumpBefore = nullptr);
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Value.cpp b/b3/B3Value.cpp
new file mode 100644
index 0000000..b4fc433
--- /dev/null
+++ b/b3/B3Value.cpp
@@ -0,0 +1,870 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Value.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3BottomProvider.h"
+#include "B3CCallValue.h"
+#include "B3FenceValue.h"
+#include "B3MemoryValue.h"
+#include "B3OriginDump.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3UpsilonValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+#include "B3VariableValue.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+const char* const Value::dumpPrefix = "@";
+
+Value::~Value()
+{
+}
+
+void Value::replaceWithIdentity(Value* value)
+{
+    // This is a bit crazy. It does an in-place replacement of whatever Value subclass this is with
+    // a plain Identity Value. We first collect all of the information we need, then we destruct the
+    // previous value in place, and then we construct the Identity Value in place.
+
+    ASSERT(m_type == value->m_type);
+
+    if (m_type == Void) {
+        replaceWithNopIgnoringType();
+        return;
+    }
+
+    unsigned index = m_index;
+    Type type = m_type;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+
+    RELEASE_ASSERT(type == value->type());
+
+    this->~Value();
+
+    new (this) Value(Identity, type, origin, value);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithBottom(InsertionSet& insertionSet, size_t index)
+{
+    replaceWithBottom(BottomProvider(insertionSet, index));
+}
+
+void Value::replaceWithNop()
+{
+    RELEASE_ASSERT(m_type == Void);
+    replaceWithNopIgnoringType();
+}
+
+void Value::replaceWithNopIgnoringType()
+{
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+
+    this->~Value();
+
+    new (this) Value(Nop, Void, origin);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithPhi()
+{
+    if (m_type == Void) {
+        replaceWithNop();
+        return;
+    }
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    BasicBlock* owner = this->owner;
+    Type type = m_type;
+
+    this->~Value();
+
+    new (this) Value(Phi, type, origin);
+
+    this->owner = owner;
+    this->m_index = index;
+}
+
+void Value::replaceWithJump(BasicBlock* owner, FrequentedBlock target)
+{
+    RELEASE_ASSERT(owner->last() == this);
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    
+    this->~Value();
+    
+    new (this) Value(Jump, Void, origin);
+    
+    this->owner = owner;
+    this->m_index = index;
+    
+    owner->setSuccessors(target);
+}
+
+void Value::replaceWithOops(BasicBlock* owner)
+{
+    RELEASE_ASSERT(owner->last() == this);
+    
+    unsigned index = m_index;
+    Origin origin = m_origin;
+    
+    this->~Value();
+    
+    new (this) Value(Oops, Void, origin);
+    
+    this->owner = owner;
+    this->m_index = index;
+    
+    owner->clearSuccessors();
+}
+
+void Value::replaceWithJump(FrequentedBlock target)
+{
+    replaceWithJump(owner, target);
+}
+
+void Value::replaceWithOops()
+{
+    replaceWithOops(owner);
+}
+
+void Value::dump(PrintStream& out) const
+{
+    bool isConstant = false;
+
+    switch (opcode()) {
+    case Const32:
+        out.print("$", asInt32(), "(");
+        isConstant = true;
+        break;
+    case Const64:
+        out.print("$", asInt64(), "(");
+        isConstant = true;
+        break;
+    case ConstFloat:
+        out.print("$", asFloat(), "(");
+        isConstant = true;
+        break;
+    case ConstDouble:
+        out.print("$", asDouble(), "(");
+        isConstant = true;
+        break;
+    default:
+        break;
+    }
+    
+    out.print(dumpPrefix, m_index);
+
+    if (isConstant)
+        out.print(")");
+}
+
+Value* Value::cloneImpl() const
+{
+    return new Value(*this);
+}
+
+void Value::dumpChildren(CommaPrinter& comma, PrintStream& out) const
+{
+    for (Value* child : children())
+        out.print(comma, pointerDump(child));
+}
+
+void Value::deepDump(const Procedure* proc, PrintStream& out) const
+{
+    out.print(m_type, " ", dumpPrefix, m_index, " = ", m_kind);
+
+    out.print("(");
+    CommaPrinter comma;
+    dumpChildren(comma, out);
+
+    if (m_origin)
+        out.print(comma, OriginDump(proc, m_origin));
+
+    dumpMeta(comma, out);
+
+    {
+        CString string = toCString(effects());
+        if (string.length())
+            out.print(comma, string);
+    }
+
+    out.print(")");
+}
+
+void Value::dumpSuccessors(const BasicBlock* block, PrintStream& out) const
+{
+    // Note that this must not crash if we have the wrong number of successors, since someone
+    // debugging a number-of-successors bug will probably want to dump IR!
+    
+    if (opcode() == Branch && block->numSuccessors() == 2) {
+        out.print("Then:", block->taken(), ", Else:", block->notTaken());
+        return;
+    }
+    
+    out.print(listDump(block->successors()));
+}
+
+Value* Value::negConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, int32_t) const
+{
+    return nullptr;
+}
+
+Value* Value::addConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::subConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::mulConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkAddConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkSubConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkMulConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::checkNegConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::divConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::uDivConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::modConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::uModConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitAndConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitOrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitXorConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::shlConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::sShrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::zShrConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::rotRConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::rotLConstant(Procedure&, const Value*) const
+{
+    return nullptr;
+}
+
+Value* Value::bitwiseCastConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::iToDConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::iToFConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::doubleToFloatConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::floatToDoubleConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::absConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::ceilConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::floorConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+Value* Value::sqrtConstant(Procedure&) const
+{
+    return nullptr;
+}
+
+TriState Value::equalConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::notEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::lessThanConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::greaterThanConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::lessEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::greaterEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::aboveConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::belowConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::aboveEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::belowEqualConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+TriState Value::equalOrUnorderedConstant(const Value*) const
+{
+    return MixedTriState;
+}
+
+Value* Value::invertedCompare(Procedure& proc) const
+{
+    if (!numChildren())
+        return nullptr;
+    if (std::optional invertedOpcode = B3::invertedCompare(opcode(), child(0)->type())) {
+        ASSERT(!kind().hasExtraBits());
+        return proc.add(*invertedOpcode, type(), origin(), children());
+    }
+    return nullptr;
+}
+
+bool Value::isRounded() const
+{
+    ASSERT(isFloat(type()));
+    switch (opcode()) {
+    case Floor:
+    case Ceil:
+    case IToD:
+    case IToF:
+        return true;
+
+    case ConstDouble: {
+        double value = asDouble();
+        return std::isfinite(value) && value == ceil(value);
+    }
+
+    case ConstFloat: {
+        float value = asFloat();
+        return std::isfinite(value) && value == ceilf(value);
+    }
+
+    default:
+        return false;
+    }
+}
+
+bool Value::returnsBool() const
+{
+    if (type() != Int32)
+        return false;
+    switch (opcode()) {
+    case Const32:
+        return asInt32() == 0 || asInt32() == 1;
+    case BitAnd:
+        return child(1)->isInt32(1)
+            || (child(0)->returnsBool() && child(1)->hasInt() && child(1)->asInt() & 1);
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+        return true;
+    case Phi:
+        // FIXME: We should have a story here.
+        // https://bugs.webkit.org/show_bug.cgi?id=150725
+        return false;
+    default:
+        return false;
+    }
+}
+
+TriState Value::asTriState() const
+{
+    switch (opcode()) {
+    case Const32:
+        return triState(!!asInt32());
+    case Const64:
+        return triState(!!asInt64());
+    case ConstDouble:
+        // Use "!= 0" to really emphasize what this mean with respect to NaN and such.
+        return triState(asDouble() != 0);
+    case ConstFloat:
+        return triState(asFloat() != 0.);
+    default:
+        return MixedTriState;
+    }
+}
+
+Effects Value::effects() const
+{
+    Effects result;
+    switch (opcode()) {
+    case Nop:
+    case Identity:
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+    case SlotBase:
+    case ArgumentReg:
+    case FramePointer:
+    case Add:
+    case Sub:
+    case Mul:
+    case Neg:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Clz:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case BitwiseCast:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+    case Select:
+        break;
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+        result.controlDependent = true;
+        break;
+    case Load8Z:
+    case Load8S:
+    case Load16Z:
+    case Load16S:
+    case Load:
+        result.reads = as()->range();
+        result.controlDependent = true;
+        break;
+    case Store8:
+    case Store16:
+    case Store:
+        result.writes = as()->range();
+        result.controlDependent = true;
+        break;
+    case WasmAddress:
+        result.readsPinned = true;
+        break;
+    case Fence: {
+        const FenceValue* fence = as();
+        result.reads = fence->read;
+        result.writes = fence->write;
+        
+        // Prevent killing of fences that claim not to write anything. It's a bit weird that we use
+        // local state as the way to do this, but it happens to work: we must assume that we cannot
+        // kill writesLocalState unless we understands exactly what the instruction is doing (like
+        // the way that fixSSA understands Set/Get and the way that reduceStrength and others
+        // understand Upsilon). This would only become a problem if we had some analysis that was
+        // looking to use the writesLocalState bit to invalidate a CSE over local state operations.
+        // Then a Fence would look block, say, the elimination of a redundant Get. But it like
+        // that's not at all how our optimizations for Set/Get/Upsilon/Phi work - they grok their
+        // operations deeply enough that they have no need to check this bit - so this cheat is
+        // fine.
+        result.writesLocalState = true;
+        break;
+    }
+    case CCall:
+        result = as()->effects;
+        break;
+    case Patchpoint:
+        result = as()->effects;
+        break;
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+    case Check:
+        result = Effects::forCheck();
+        break;
+    case WasmBoundsCheck:
+        result.readsPinned = true;
+        result.exitsSideways = true;
+        break;
+    case Upsilon:
+    case Set:
+        result.writesLocalState = true;
+        break;
+    case Phi:
+    case Get:
+        result.readsLocalState = true;
+        break;
+    case Jump:
+    case Branch:
+    case Switch:
+    case Return:
+    case Oops:
+    case EntrySwitch:
+        result.terminal = true;
+        break;
+    }
+    if (traps()) {
+        result.exitsSideways = true;
+        result.reads = HeapRange::top();
+    }
+    return result;
+}
+
+ValueKey Value::key() const
+{
+    switch (opcode()) {
+    case FramePointer:
+        return ValueKey(kind(), type());
+    case Identity:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Clz:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Check:
+    case BitwiseCast:
+    case Neg:
+        return ValueKey(kind(), type(), child(0));
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return ValueKey(kind(), type(), child(0), child(1));
+    case Select:
+        return ValueKey(kind(), type(), child(0), child(1), child(2));
+    case Const32:
+        return ValueKey(Const32, type(), static_cast(asInt32()));
+    case Const64:
+        return ValueKey(Const64, type(), asInt64());
+    case ConstDouble:
+        return ValueKey(ConstDouble, type(), asDouble());
+    case ConstFloat:
+        return ValueKey(ConstFloat, type(), asFloat());
+    case ArgumentReg:
+        return ValueKey(
+            ArgumentReg, type(),
+            static_cast(as()->argumentReg().index()));
+    case SlotBase:
+        return ValueKey(
+            SlotBase, type(),
+            static_cast(as()->slot()->index()));
+    default:
+        return ValueKey();
+    }
+}
+
+void Value::performSubstitution()
+{
+    for (Value*& child : children()) {
+        while (child->opcode() == Identity)
+            child = child->child(0);
+    }
+}
+
+bool Value::isFree() const
+{
+    switch (opcode()) {
+    case Const32:
+    case Const64:
+    case ConstDouble:
+    case ConstFloat:
+    case Identity:
+    case Nop:
+        return true;
+    default:
+        return false;
+    }
+}
+
+void Value::dumpMeta(CommaPrinter&, PrintStream&) const
+{
+}
+
+Type Value::typeFor(Kind kind, Value* firstChild, Value* secondChild)
+{
+    switch (kind.opcode()) {
+    case Identity:
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case Neg:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Clz:
+    case Abs:
+    case Ceil:
+    case Floor:
+    case Sqrt:
+    case CheckAdd:
+    case CheckSub:
+    case CheckMul:
+        return firstChild->type();
+    case FramePointer:
+        return pointerType();
+    case SExt8:
+    case SExt16:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case LessEqual:
+    case GreaterEqual:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+    case EqualOrUnordered:
+        return Int32;
+    case Trunc:
+        return firstChild->type() == Int64 ? Int32 : Float;
+    case SExt32:
+    case ZExt32:
+        return Int64;
+    case FloatToDouble:
+    case IToD:
+        return Double;
+    case DoubleToFloat:
+    case IToF:
+        return Float;
+    case BitwiseCast:
+        switch (firstChild->type()) {
+        case Int64:
+            return Double;
+        case Double:
+            return Int64;
+        case Int32:
+            return Float;
+        case Float:
+            return Int32;
+        case Void:
+            ASSERT_NOT_REACHED();
+        }
+        return Void;
+    case Nop:
+    case Jump:
+    case Branch:
+    case Return:
+    case Oops:
+    case EntrySwitch:
+    case WasmBoundsCheck:
+        return Void;
+    case Select:
+        ASSERT(secondChild);
+        return secondChild->type();
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+void Value::badKind(Kind kind, unsigned numArgs)
+{
+    dataLog("Bad kind ", kind, " with ", numArgs, " args.\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Value.h b/b3/B3Value.h
new file mode 100644
index 0000000..ebe52ad
--- /dev/null
+++ b/b3/B3Value.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "B3Effects.h"
+#include "B3FrequentedBlock.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include "B3ValueKey.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class BasicBlock;
+class CheckValue;
+class InsertionSet;
+class PhiChildren;
+class Procedure;
+
+class JS_EXPORT_PRIVATE Value {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    typedef Vector AdjacencyList;
+
+    static const char* const dumpPrefix;
+
+    static bool accepts(Kind) { return true; }
+
+    virtual ~Value();
+
+    unsigned index() const { return m_index; }
+    
+    // Note that the kind is immutable, except for replacing values with:
+    // Identity, Nop, Oops, Jump, and Phi. See below for replaceWithXXX() methods.
+    Kind kind() const { return m_kind; }
+    
+    Opcode opcode() const { return kind().opcode(); }
+    
+    // It's good practice to mirror Kind methods here, so you can say value->isBlah()
+    // instead of value->kind().isBlah().
+    bool isChill() const { return kind().isChill(); }
+    bool traps() const { return kind().traps(); }
+
+    Origin origin() const { return m_origin; }
+    void setOrigin(Origin origin) { m_origin = origin; }
+    
+    Value*& child(unsigned index) { return m_children[index]; }
+    Value* child(unsigned index) const { return m_children[index]; }
+
+    Value*& lastChild() { return m_children.last(); }
+    Value* lastChild() const { return m_children.last(); }
+
+    unsigned numChildren() const { return m_children.size(); }
+
+    Type type() const { return m_type; }
+    void setType(Type type) { m_type = type; }
+
+    // This is useful when lowering. Note that this is only valid for non-void values.
+    Air::Arg::Type airType() const { return Air::Arg::typeForB3Type(type()); }
+    Air::Arg::Width airWidth() const { return Air::Arg::widthForB3Type(type()); }
+
+    AdjacencyList& children() { return m_children; } 
+    const AdjacencyList& children() const { return m_children; }
+
+    // If you want to replace all uses of this value with a different value, then replace this
+    // value with Identity. Then do a pass of performSubstitution() on all of the values that use
+    // this one. Usually we do all of this in one pass in pre-order, which ensures that the
+    // X->replaceWithIdentity() calls happen before the performSubstitution() calls on X's users.
+    void replaceWithIdentity(Value*);
+    
+    // It's often necessary to kill a value. It's tempting to replace the value with Nop or to
+    // just remove it. But unless you are sure that the value is Void, you will probably still
+    // have other values that use this one. Sure, you may kill those later, or you might not. This
+    // method lets you kill a value safely. It will replace Void values with Nop and non-Void
+    // values with Identities on bottom constants. For this reason, this takes a callback that is
+    // responsible for creating bottoms. There's a utility for this, see B3BottomProvider.h. You
+    // can also access that utility using replaceWithBottom(InsertionSet&, size_t).
+    template
+    void replaceWithBottom(const BottomProvider&);
+    
+    void replaceWithBottom(InsertionSet&, size_t index);
+
+    // Use this if you want to kill a value and you are sure that the value is Void.
+    void replaceWithNop();
+    
+    // Use this if you want to kill a value and you are sure that nobody is using it anymore.
+    void replaceWithNopIgnoringType();
+    
+    void replaceWithPhi();
+    
+    // These transformations are only valid for terminals.
+    void replaceWithJump(BasicBlock* owner, FrequentedBlock);
+    void replaceWithOops(BasicBlock* owner);
+    
+    // You can use this form if owners are valid. They're usually not valid.
+    void replaceWithJump(FrequentedBlock);
+    void replaceWithOops();
+
+    void dump(PrintStream&) const;
+    void deepDump(const Procedure*, PrintStream&) const;
+    
+    virtual void dumpSuccessors(const BasicBlock*, PrintStream&) const;
+
+    // This is how you cast Values. For example, if you want to do something provided that we have a
+    // ArgumentRegValue, you can do:
+    //
+    // if (ArgumentRegValue* argumentReg = value->as()) {
+    //     things
+    // }
+    //
+    // This will return null if this kind() != ArgumentReg. This works because this returns nullptr
+    // if T::accepts(kind()) returns false.
+    template
+    T* as();
+    template
+    const T* as() const;
+
+    // What follows are a bunch of helpers for inspecting and modifying values. Note that we have a
+    // bunch of different idioms for implementing such helpers. You can use virtual methods, and
+    // override from the various Value subclasses. You can put the method inside Value and make it
+    // non-virtual, and the implementation can switch on kind. The method could be inline or not.
+    // If a method is specific to some Value subclass, you could put it in the subclass, or you could
+    // put it on Value anyway. It's fine to pick whatever feels right, and we shouldn't restrict
+    // ourselves to any particular idiom.
+
+    bool isConstant() const;
+    bool isInteger() const;
+    
+    virtual Value* negConstant(Procedure&) const;
+    virtual Value* addConstant(Procedure&, int32_t other) const;
+    virtual Value* addConstant(Procedure&, const Value* other) const;
+    virtual Value* subConstant(Procedure&, const Value* other) const;
+    virtual Value* mulConstant(Procedure&, const Value* other) const;
+    virtual Value* checkAddConstant(Procedure&, const Value* other) const;
+    virtual Value* checkSubConstant(Procedure&, const Value* other) const;
+    virtual Value* checkMulConstant(Procedure&, const Value* other) const;
+    virtual Value* checkNegConstant(Procedure&) const;
+    virtual Value* divConstant(Procedure&, const Value* other) const; // This chooses Div semantics for integers.
+    virtual Value* uDivConstant(Procedure&, const Value* other) const;
+    virtual Value* modConstant(Procedure&, const Value* other) const; // This chooses Mod semantics.
+    virtual Value* uModConstant(Procedure&, const Value* other) const;
+    virtual Value* bitAndConstant(Procedure&, const Value* other) const;
+    virtual Value* bitOrConstant(Procedure&, const Value* other) const;
+    virtual Value* bitXorConstant(Procedure&, const Value* other) const;
+    virtual Value* shlConstant(Procedure&, const Value* other) const;
+    virtual Value* sShrConstant(Procedure&, const Value* other) const;
+    virtual Value* zShrConstant(Procedure&, const Value* other) const;
+    virtual Value* rotRConstant(Procedure&, const Value* other) const;
+    virtual Value* rotLConstant(Procedure&, const Value* other) const;
+    virtual Value* bitwiseCastConstant(Procedure&) const;
+    virtual Value* iToDConstant(Procedure&) const;
+    virtual Value* iToFConstant(Procedure&) const;
+    virtual Value* doubleToFloatConstant(Procedure&) const;
+    virtual Value* floatToDoubleConstant(Procedure&) const;
+    virtual Value* absConstant(Procedure&) const;
+    virtual Value* ceilConstant(Procedure&) const;
+    virtual Value* floorConstant(Procedure&) const;
+    virtual Value* sqrtConstant(Procedure&) const;
+
+    virtual TriState equalConstant(const Value* other) const;
+    virtual TriState notEqualConstant(const Value* other) const;
+    virtual TriState lessThanConstant(const Value* other) const;
+    virtual TriState greaterThanConstant(const Value* other) const;
+    virtual TriState lessEqualConstant(const Value* other) const;
+    virtual TriState greaterEqualConstant(const Value* other) const;
+    virtual TriState aboveConstant(const Value* other) const;
+    virtual TriState belowConstant(const Value* other) const;
+    virtual TriState aboveEqualConstant(const Value* other) const;
+    virtual TriState belowEqualConstant(const Value* other) const;
+    virtual TriState equalOrUnorderedConstant(const Value* other) const;
+    
+    // If the value is a comparison then this returns the inverted form of that comparison, if
+    // possible. It can be impossible for double comparisons, where for example LessThan and
+    // GreaterEqual behave differently. If this returns a value, it is a new value, which must be
+    // either inserted into some block or deleted.
+    Value* invertedCompare(Procedure&) const;
+
+    bool hasInt32() const;
+    int32_t asInt32() const;
+    bool isInt32(int32_t) const;
+    
+    bool hasInt64() const;
+    int64_t asInt64() const;
+    bool isInt64(int64_t) const;
+
+    bool hasInt() const;
+    int64_t asInt() const;
+    bool isInt(int64_t value) const;
+
+    bool hasIntPtr() const;
+    intptr_t asIntPtr() const;
+    bool isIntPtr(intptr_t) const;
+
+    bool hasDouble() const;
+    double asDouble() const;
+    bool isEqualToDouble(double) const; // We say "isEqualToDouble" because "isDouble" would be a bit equality.
+
+    bool hasFloat() const;
+    float asFloat() const;
+
+    bool hasNumber() const;
+    template bool isRepresentableAs() const;
+    template T asNumber() const;
+
+    // Booleans in B3 are Const32(0) or Const32(1). So this is true if the type is Int32 and the only
+    // possible return values are 0 or 1. It's OK for this method to conservatively return false.
+    bool returnsBool() const;
+
+    bool isNegativeZero() const;
+
+    bool isRounded() const;
+
+    TriState asTriState() const;
+    bool isLikeZero() const { return asTriState() == FalseTriState; }
+    bool isLikeNonZero() const { return asTriState() == TrueTriState; }
+
+    Effects effects() const;
+
+    // This returns a ValueKey that describes that this Value returns when it executes. Returns an
+    // empty ValueKey if this Value is impure. Note that an operation that returns Void could still
+    // have a non-empty ValueKey. This happens for example with Check operations.
+    ValueKey key() const;
+
+    // Makes sure that none of the children are Identity's. If a child points to Identity, this will
+    // repoint it at the Identity's child. For simplicity, this will follow arbitrarily long chains
+    // of Identity's.
+    void performSubstitution();
+    
+    // Free values are those whose presence is guaranteed not to hurt code. We consider constants,
+    // Identities, and Nops to be free. Constants are free because we hoist them to an optimal place.
+    // Identities and Nops are free because we remove them.
+    bool isFree() const;
+
+    // Walk the ancestors of this value (i.e. the graph of things it transitively uses). This
+    // either walks phis or not, depending on whether PhiChildren is null. Your callback gets
+    // called with the signature:
+    //
+    //     (Value*) -> WalkStatus
+    enum WalkStatus {
+        Continue,
+        IgnoreChildren,
+        Stop
+    };
+    template
+    void walk(const Functor& functor, PhiChildren* = nullptr);
+
+protected:
+    virtual Value* cloneImpl() const;
+    
+    virtual void dumpChildren(CommaPrinter&, PrintStream&) const;
+    virtual void dumpMeta(CommaPrinter&, PrintStream&) const;
+
+private:
+    friend class Procedure;
+    friend class SparseCollection;
+
+    // Checks that this kind is valid for use with B3::Value.
+    ALWAYS_INLINE static void checkKind(Kind kind, unsigned numArgs)
+    {
+        switch (kind.opcode()) {
+        case FramePointer:
+        case Nop:
+        case Phi:
+        case Jump:
+        case Oops:
+        case EntrySwitch:
+            if (UNLIKELY(numArgs))
+                badKind(kind, numArgs);
+            break;
+        case Return:
+            if (UNLIKELY(numArgs > 1))
+                badKind(kind, numArgs);
+            break;
+        case Identity:
+        case Neg:
+        case Clz:
+        case Abs:
+        case Ceil:
+        case Floor:
+        case Sqrt:
+        case SExt8:
+        case SExt16:
+        case Trunc:
+        case SExt32:
+        case ZExt32:
+        case FloatToDouble:
+        case IToD:
+        case DoubleToFloat:
+        case IToF:
+        case BitwiseCast:
+        case Branch:
+            if (UNLIKELY(numArgs != 1))
+                badKind(kind, numArgs);
+            break;
+        case Add:
+        case Sub:
+        case Mul:
+        case Div:
+        case UDiv:
+        case Mod:
+        case UMod:
+        case BitAnd:
+        case BitOr:
+        case BitXor:
+        case Shl:
+        case SShr:
+        case ZShr:
+        case RotR:
+        case RotL:
+        case Equal:
+        case NotEqual:
+        case LessThan:
+        case GreaterThan:
+        case LessEqual:
+        case GreaterEqual:
+        case Above:
+        case Below:
+        case AboveEqual:
+        case BelowEqual:
+        case EqualOrUnordered:
+            if (UNLIKELY(numArgs != 2))
+                badKind(kind, numArgs);
+            break;
+        case Select:
+            if (UNLIKELY(numArgs != 3))
+                badKind(kind, numArgs);
+            break;
+        default:
+            badKind(kind, numArgs);
+            break;
+        }
+    }
+
+protected:
+    enum CheckedOpcodeTag { CheckedOpcode };
+
+    Value(const Value&) = default;
+    Value& operator=(const Value&) = default;
+    
+    // Instantiate values via Procedure.
+    // This form requires specifying the type explicitly:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, Value* firstChild, Arguments... arguments)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children{ firstChild, arguments... }
+    {
+    }
+    // This form is for specifying the type explicitly when the opcode has no children:
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode and first child:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild)
+        : m_kind(kind)
+        , m_type(typeFor(kind, firstChild))
+        , m_origin(origin)
+        , m_children{ firstChild }
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode and first and second child:
+    template
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin, Value* firstChild, Value* secondChild, Arguments... arguments)
+        : m_kind(kind)
+        , m_type(typeFor(kind, firstChild, secondChild))
+        , m_origin(origin)
+        , m_children{ firstChild, secondChild, arguments... }
+    {
+    }
+    // This form is for those opcodes that can infer their type from the opcode alone, and that don't
+    // take any arguments:
+    explicit Value(CheckedOpcodeTag, Kind kind, Origin origin)
+        : m_kind(kind)
+        , m_type(typeFor(kind, nullptr))
+        , m_origin(origin)
+    {
+    }
+    // Use this form for varargs.
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, const AdjacencyList& children)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children(children)
+    {
+    }
+    explicit Value(CheckedOpcodeTag, Kind kind, Type type, Origin origin, AdjacencyList&& children)
+        : m_kind(kind)
+        , m_type(type)
+        , m_origin(origin)
+        , m_children(WTFMove(children))
+    {
+    }
+
+    // This is the constructor you end up actually calling, if you're instantiating Value
+    // directly.
+    template
+        explicit Value(Kind kind, Type type, Origin origin)
+        : Value(CheckedOpcode, kind, type, origin)
+    {
+        checkKind(kind, 0);
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, Value* firstChild, Arguments&&... arguments)
+        : Value(CheckedOpcode, kind, type, origin, firstChild, std::forward(arguments)...)
+    {
+        checkKind(kind, 1 + sizeof...(arguments));
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, const AdjacencyList& children)
+        : Value(CheckedOpcode, kind, type, origin, children)
+    {
+        checkKind(kind, children.size());
+    }
+    template
+        explicit Value(Kind kind, Type type, Origin origin, AdjacencyList&& children)
+        : Value(CheckedOpcode, kind, type, origin, WTFMove(children))
+    {
+        checkKind(kind, m_children.size());
+    }
+    template
+        explicit Value(Kind kind, Origin origin, Arguments&&... arguments)
+        : Value(CheckedOpcode, kind, origin, std::forward(arguments)...)
+    {
+        checkKind(kind, sizeof...(arguments));
+    }
+
+private:
+    friend class CheckValue; // CheckValue::convertToAdd() modifies m_kind.
+    
+    static Type typeFor(Kind, Value* firstChild, Value* secondChild = nullptr);
+
+    // This group of fields is arranged to fit in 64 bits.
+protected:
+    unsigned m_index { UINT_MAX };
+private:
+    Kind m_kind;
+    Type m_type;
+    
+    Origin m_origin;
+    AdjacencyList m_children;
+
+    JS_EXPORT_PRIVATE NO_RETURN_DUE_TO_CRASH static void badKind(Kind, unsigned);
+
+public:
+    BasicBlock* owner { nullptr }; // computed by Procedure::resetValueOwners().
+};
+
+class DeepValueDump {
+public:
+    DeepValueDump(const Procedure* proc, const Value* value)
+        : m_proc(proc)
+        , m_value(value)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_value)
+            m_value->deepDump(m_proc, out);
+        else
+            out.print("");
+    }
+
+private:
+    const Procedure* m_proc;
+    const Value* m_value;
+};
+
+inline DeepValueDump deepDump(const Procedure& proc, const Value* value)
+{
+    return DeepValueDump(&proc, value);
+}
+inline DeepValueDump deepDump(const Value* value)
+{
+    return DeepValueDump(nullptr, value);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ValueInlines.h b/b3/B3ValueInlines.h
new file mode 100644
index 0000000..57f93d6
--- /dev/null
+++ b/b3/B3ValueInlines.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3CheckValue.h"
+#include "B3Const32Value.h"
+#include "B3Const64Value.h"
+#include "B3ConstDoubleValue.h"
+#include "B3ConstFloatValue.h"
+#include "B3PatchpointValue.h"
+#include "B3PhiChildren.h"
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+template
+void Value::replaceWithBottom(const BottomProvider& bottomProvider)
+{
+    if (m_type == Void) {
+        replaceWithNop();
+        return;
+    }
+    
+    if (isConstant())
+        return;
+    
+    replaceWithIdentity(bottomProvider(m_origin, m_type));
+}
+
+template
+inline T* Value::as()
+{
+    if (T::accepts(kind()))
+        return static_cast(this);
+    return nullptr;
+}
+
+template
+inline const T* Value::as() const
+{
+    return const_cast(this)->as();
+}
+
+inline bool Value::isConstant() const
+{
+    return B3::isConstant(opcode());
+}
+
+inline bool Value::isInteger() const
+{
+    return type() == Int32 || type() == Int64;
+}
+
+inline bool Value::hasInt32() const
+{
+    return !!as();
+}
+
+inline int32_t Value::asInt32() const
+{
+    return as()->value();
+}
+
+inline bool Value::isInt32(int32_t value) const
+{
+    return hasInt32() && asInt32() == value;
+}
+
+inline bool Value::hasInt64() const
+{
+    return !!as();
+}
+
+inline int64_t Value::asInt64() const
+{
+    return as()->value();
+}
+
+inline bool Value::isInt64(int64_t value) const
+{
+    return hasInt64() && asInt64() == value;
+}
+
+inline bool Value::hasInt() const
+{
+    return hasInt32() || hasInt64();
+}
+
+inline int64_t Value::asInt() const
+{
+    return hasInt32() ? asInt32() : asInt64();
+}
+
+inline bool Value::isInt(int64_t value) const
+{
+    return hasInt() && asInt() == value;
+}
+
+inline bool Value::hasIntPtr() const
+{
+    if (is64Bit())
+        return hasInt64();
+    return hasInt32();
+}
+
+inline intptr_t Value::asIntPtr() const
+{
+    if (is64Bit())
+        return asInt64();
+    return asInt32();
+}
+
+inline bool Value::isIntPtr(intptr_t value) const
+{
+    return hasIntPtr() && asIntPtr() == value;
+}
+
+inline bool Value::hasDouble() const
+{
+    return !!as();
+}
+
+inline double Value::asDouble() const
+{
+    return as()->value();
+}
+
+inline bool Value::isEqualToDouble(double value) const
+{
+    return hasDouble() && asDouble() == value;
+}
+
+inline bool Value::hasFloat() const
+{
+    return !!as();
+}
+
+inline float Value::asFloat() const
+{
+    return as()->value();
+}
+
+inline bool Value::hasNumber() const
+{
+    return hasInt() || hasDouble() || hasFloat();
+}
+
+inline bool Value::isNegativeZero() const
+{
+    if (hasDouble()) {
+        double value = asDouble();
+        return !value && std::signbit(value);
+    }
+    if (hasFloat()) {
+        float value = asFloat();
+        return !value && std::signbit(value);
+    }
+    return false;
+}
+
+template
+inline bool Value::isRepresentableAs() const
+{
+    switch (opcode()) {
+    case Const32:
+        return B3::isRepresentableAs(asInt32());
+    case Const64:
+        return B3::isRepresentableAs(asInt64());
+    case ConstDouble:
+        return B3::isRepresentableAs(asDouble());
+    case ConstFloat:
+        return B3::isRepresentableAs(asFloat());
+    default:
+        return false;
+    }
+}
+
+template
+inline T Value::asNumber() const
+{
+    switch (opcode()) {
+    case Const32:
+        return static_cast(asInt32());
+    case Const64:
+        return static_cast(asInt64());
+    case ConstDouble:
+        return static_cast(asDouble());
+    case ConstFloat:
+        return static_cast(asFloat());
+    default:
+        return T();
+    }
+}
+
+template
+void Value::walk(const Functor& functor, PhiChildren* phiChildren)
+{
+    GraphNodeWorklist worklist;
+    worklist.push(this);
+    while (Value* value = worklist.pop()) {
+        WalkStatus status = functor(value);
+        switch (status) {
+        case Continue:
+            if (value->opcode() == Phi) {
+                if (phiChildren)
+                    worklist.pushAll(phiChildren->at(value).values());
+            } else
+                worklist.pushAll(value->children());
+            break;
+        case IgnoreChildren:
+            break;
+        case Stop:
+            return;
+        }
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ValueKey.cpp b/b3/B3ValueKey.cpp
new file mode 100644
index 0000000..10edff3
--- /dev/null
+++ b/b3/B3ValueKey.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ValueKey.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3ArgumentRegValue.h"
+#include "B3ProcedureInlines.h"
+#include "B3SlotBaseValue.h"
+#include "B3ValueInlines.h"
+#include "B3ValueKeyInlines.h"
+
+namespace JSC { namespace B3 {
+
+ValueKey ValueKey::intConstant(Type type, int64_t value)
+{
+    switch (type) {
+    case Int32:
+        return ValueKey(Const32, Int32, value);
+    case Int64:
+        return ValueKey(Const64, Int64, value);
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ValueKey();
+    }
+}
+
+void ValueKey::dump(PrintStream& out) const
+{
+    out.print(m_type, " ", m_kind, "(", u.indices[0], ", ", u.indices[1], ", ", u.indices[2], ")");
+}
+
+Value* ValueKey::materialize(Procedure& proc, Origin origin) const
+{
+    switch (opcode()) {
+    case FramePointer:
+        return proc.add(kind(), type(), origin);
+    case Identity:
+    case Sqrt:
+    case SExt8:
+    case SExt16:
+    case SExt32:
+    case ZExt32:
+    case Clz:
+    case Trunc:
+    case IToD:
+    case IToF:
+    case FloatToDouble:
+    case DoubleToFloat:
+    case Check:
+        return proc.add(kind(), type(), origin, child(proc, 0));
+    case Add:
+    case Sub:
+    case Mul:
+    case Div:
+    case UDiv:
+    case Mod:
+    case UMod:
+    case BitAnd:
+    case BitOr:
+    case BitXor:
+    case Shl:
+    case SShr:
+    case ZShr:
+    case RotR:
+    case RotL:
+    case Equal:
+    case NotEqual:
+    case LessThan:
+    case GreaterThan:
+    case Above:
+    case Below:
+    case AboveEqual:
+    case BelowEqual:
+        return proc.add(kind(), type(), origin, child(proc, 0), child(proc, 1));
+    case Select:
+        return proc.add(kind(), type(), origin, child(proc, 0), child(proc, 1), child(proc, 2));
+    case Const32:
+        return proc.add(origin, static_cast(value()));
+    case Const64:
+        return proc.add(origin, value());
+    case ConstDouble:
+        return proc.add(origin, doubleValue());
+    case ConstFloat:
+        return proc.add(origin, floatValue());
+    case ArgumentReg:
+        return proc.add(origin, Reg::fromIndex(static_cast(value())));
+    case SlotBase:
+        return proc.add(origin, proc.stackSlots()[value()]);
+    default:
+        return nullptr;
+    }
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3ValueKey.h b/b3/B3ValueKey.h
new file mode 100644
index 0000000..18b092c
--- /dev/null
+++ b/b3/B3ValueKey.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3HeapRange.h"
+#include "B3Kind.h"
+#include "B3Origin.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+class Value;
+
+// ValueKeys are useful for CSE. They abstractly describe the value that a Value returns when it
+// executes. Any Value that has the same ValueKey is guaranteed to return the same value, provided
+// that they return a non-empty ValueKey. Operations that have effects, or that can have their
+// behavior affected by other operations' effects, will return an empty ValueKey. You have to use
+// other mechanisms for doing CSE for impure operations.
+
+class ValueKey {
+public:
+    ValueKey()
+    {
+    }
+
+    ValueKey(Kind kind, Type type)
+        : m_kind(kind)
+        , m_type(type)
+    {
+    }
+
+    ValueKey(Kind, Type, Value* child);
+
+    ValueKey(Kind, Type, Value* left, Value* right);
+
+    ValueKey(Kind, Type, Value* a, Value* b, Value* c);
+
+    ValueKey(Kind kind, Type type, int64_t value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.value = value;
+    }
+
+    ValueKey(Kind kind, Type type, double value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.doubleValue = value;
+    }
+
+    ValueKey(Kind kind, Type type, float value)
+        : m_kind(kind)
+        , m_type(type)
+    {
+        u.floatValue = value;
+    }
+
+    static ValueKey intConstant(Type type, int64_t value);
+
+    Kind kind() const { return m_kind; }
+    Opcode opcode() const { return kind().opcode(); }
+    Type type() const { return m_type; }
+    unsigned childIndex(unsigned index) const { return u.indices[index]; }
+    Value* child(Procedure&, unsigned index) const;
+    int64_t value() const { return u.value; }
+    double doubleValue() const { return u.doubleValue; }
+    double floatValue() const { return u.floatValue; }
+
+    bool operator==(const ValueKey& other) const
+    {
+        return m_kind == other.m_kind
+            && m_type == other.m_type
+            && u == other.u;
+    }
+
+    bool operator!=(const ValueKey& other) const
+    {
+        return !(*this == other);
+    }
+
+    unsigned hash() const
+    {
+        return m_kind.hash() + m_type + WTF::IntHash::hash(u.indices[0]) + u.indices[1] + u.indices[2];
+    }
+
+    explicit operator bool() const { return *this != ValueKey(); }
+
+    void dump(PrintStream&) const;
+
+    bool canMaterialize() const
+    {
+        if (!*this)
+            return false;
+        switch (opcode()) {
+        case CheckAdd:
+        case CheckSub:
+        case CheckMul:
+            return false;
+        default:
+            return true;
+        }
+    }
+
+    bool isConstant() const
+    {
+        return B3::isConstant(opcode());
+    }
+
+    // Attempts to materialize the Value for this ValueKey. May return nullptr if the value cannot
+    // be materialized. This happens for CheckAdd and friends. You can use canMaterialize() to check
+    // if your key is materializable.
+    Value* materialize(Procedure&, Origin) const;
+
+    ValueKey(WTF::HashTableDeletedValueType)
+        : m_type { Int32 }
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == ValueKey(WTF::HashTableDeletedValue);
+    }
+        
+private:
+    Kind m_kind;
+    Type m_type { Void };
+    union U {
+        unsigned indices[3];
+        int64_t value;
+        double doubleValue;
+        float floatValue;
+
+        U()
+        {
+            indices[0] = 0;
+            indices[1] = 0;
+            indices[2] = 0;
+        }
+
+        bool operator==(const U& other) const
+        {
+            return indices[0] == other.indices[0]
+                && indices[1] == other.indices[1]
+                && indices[2] == other.indices[2];
+        }
+    } u;
+};
+
+struct ValueKeyHash {
+    static unsigned hash(const ValueKey& key) { return key.hash(); }
+    static bool equal(const ValueKey& a, const ValueKey& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::ValueKeyHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public SimpleClassHashTraits {
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ValueKeyInlines.h b/b3/B3ValueKeyInlines.h
new file mode 100644
index 0000000..14158d5
--- /dev/null
+++ b/b3/B3ValueKeyInlines.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Procedure.h"
+#include "B3Value.h"
+#include "B3ValueKey.h"
+
+namespace JSC { namespace B3 {
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* child)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = child->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* left, Value* right)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = left->index();
+    u.indices[1] = right->index();
+}
+
+inline ValueKey::ValueKey(Kind kind, Type type, Value* a, Value* b, Value* c)
+    : m_kind(kind)
+    , m_type(type)
+{
+    u.indices[0] = a->index();
+    u.indices[1] = b->index();
+    u.indices[2] = c->index();
+}
+
+inline Value* ValueKey::child(Procedure& proc, unsigned index) const
+{
+    return proc.values()[index];
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ValueRep.cpp b/b3/B3ValueRep.cpp
new file mode 100644
index 0000000..9888d22
--- /dev/null
+++ b/b3/B3ValueRep.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3ValueRep.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AssemblyHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC { namespace B3 {
+
+void ValueRep::addUsedRegistersTo(RegisterSet& set) const
+{
+    switch (m_kind) {
+    case WarmAny:
+    case ColdAny:
+    case LateColdAny:
+    case SomeRegister:
+    case SomeEarlyRegister:
+    case Constant:
+        return;
+    case LateRegister:
+    case Register:
+        set.set(reg());
+        return;
+    case Stack:
+    case StackArgument:
+        set.set(MacroAssembler::stackPointerRegister);
+        set.set(GPRInfo::callFrameRegister);
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+RegisterSet ValueRep::usedRegisters() const
+{
+    RegisterSet result;
+    addUsedRegistersTo(result);
+    return result;
+}
+
+void ValueRep::dump(PrintStream& out) const
+{
+    out.print(m_kind);
+    switch (m_kind) {
+    case WarmAny:
+    case ColdAny:
+    case LateColdAny:
+    case SomeRegister:
+    case SomeEarlyRegister:
+        return;
+    case LateRegister:
+    case Register:
+        out.print("(", reg(), ")");
+        return;
+    case Stack:
+        out.print("(", offsetFromFP(), ")");
+        return;
+    case StackArgument:
+        out.print("(", offsetFromSP(), ")");
+        return;
+    case Constant:
+        out.print("(", value(), ")");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void ValueRep::emitRestore(AssemblyHelpers& jit, Reg reg) const
+{
+    if (reg.isGPR()) {
+        switch (kind()) {
+        case LateRegister:
+        case Register:
+            if (isGPR())
+                jit.move(gpr(), reg.gpr());
+            else
+                jit.moveDoubleTo64(fpr(), reg.gpr());
+            break;
+        case Stack:
+            jit.load64(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.gpr());
+            break;
+        case Constant:
+            jit.move(AssemblyHelpers::TrustedImm64(value()), reg.gpr());
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+        return;
+    }
+    
+    switch (kind()) {
+    case LateRegister:
+    case Register:
+        if (isGPR())
+            jit.move64ToDouble(gpr(), reg.fpr());
+        else
+            jit.moveDouble(fpr(), reg.fpr());
+        break;
+    case Stack:
+        jit.loadDouble(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.fpr());
+        break;
+    case Constant:
+        jit.move(AssemblyHelpers::TrustedImm64(value()), jit.scratchRegister());
+        jit.move64ToDouble(jit.scratchRegister(), reg.fpr());
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+}
+
+ValueRecovery ValueRep::recoveryForJSValue() const
+{
+    switch (kind()) {
+    case LateRegister:
+    case Register:
+        return ValueRecovery::inGPR(gpr(), DataFormatJS);
+    case Stack:
+        RELEASE_ASSERT(!(offsetFromFP() % sizeof(EncodedJSValue)));
+        return ValueRecovery::displacedInJSStack(
+            VirtualRegister(offsetFromFP() / sizeof(EncodedJSValue)),
+            DataFormatJS);
+    case Constant:
+        return ValueRecovery::constant(JSValue::decode(value()));
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return { };
+    }
+}
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+using namespace JSC::B3;
+
+void printInternal(PrintStream& out, ValueRep::Kind kind)
+{
+    switch (kind) {
+    case ValueRep::WarmAny:
+        out.print("WarmAny");
+        return;
+    case ValueRep::ColdAny:
+        out.print("ColdAny");
+        return;
+    case ValueRep::LateColdAny:
+        out.print("LateColdAny");
+        return;
+    case ValueRep::SomeRegister:
+        out.print("SomeRegister");
+        return;
+    case ValueRep::SomeEarlyRegister:
+        out.print("SomeEarlyRegister");
+        return;
+    case ValueRep::Register:
+        out.print("Register");
+        return;
+    case ValueRep::LateRegister:
+        out.print("LateRegister");
+        return;
+    case ValueRep::Stack:
+        out.print("Stack");
+        return;
+    case ValueRep::StackArgument:
+        out.print("StackArgument");
+        return;
+    case ValueRep::Constant:
+        out.print("Constant");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3ValueRep.h b/b3/B3ValueRep.h
new file mode 100644
index 0000000..5f9635e
--- /dev/null
+++ b/b3/B3ValueRep.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "Reg.h"
+#include "RegisterSet.h"
+#include "ValueRecovery.h"
+#include 
+
+namespace JSC {
+
+class AssemblyHelpers;
+
+namespace B3 {
+
+// We use this class to describe value representations at stackmaps. It's used both to force a
+// representation and to get the representation. When the B3 client forces a representation, we say
+// that it's an input. When B3 tells the client what representation it picked, we say that it's an
+// output.
+
+class ValueRep {
+public:
+    enum Kind {
+        // As an input representation, this means that B3 can pick any representation. As an output
+        // representation, this means that we don't know. This will only arise as an output
+        // representation for the active arguments of Check/CheckAdd/CheckSub/CheckMul.
+        WarmAny,
+
+        // Same as WarmAny, but implies that the use is cold. A cold use is not counted as a use for
+        // computing the priority of the used temporary.
+        ColdAny,
+
+        // Same as ColdAny, but also implies that the use occurs after all other effects of the stackmap
+        // value.
+        LateColdAny,
+
+        // As an input representation, this means that B3 should pick some register. It could be a
+        // register that this claims to clobber!
+        SomeRegister,
+
+        // As an input representation, this tells us that B3 should pick some register, but implies
+        // that the def happens before any of the effects of the stackmap. This is only valid for
+        // the result constraint of a Patchpoint.
+        SomeEarlyRegister,
+
+        // As an input representation, this forces a particular register. As an output
+        // representation, this tells us what register B3 picked.
+        Register,
+
+        // As an input representation, this forces a particular register and states that
+        // the register is used late. This means that the register is used after the result
+        // is defined (i.e, the result will interfere with this as an input).
+        // It's not a valid output representation.
+        LateRegister,
+
+        // As an output representation, this tells us what stack slot B3 picked. It's not a valid
+        // input representation.
+        Stack,
+
+        // As an input representation, this forces the value to end up in the argument area at some
+        // offset.
+        StackArgument,
+
+        // As an output representation, this tells us that B3 constant-folded the value.
+        Constant
+    };
+    
+    ValueRep()
+        : m_kind(WarmAny)
+    {
+    }
+
+    explicit ValueRep(Reg reg)
+        : m_kind(Register)
+    {
+        u.reg = reg;
+    }
+
+    ValueRep(Kind kind)
+        : m_kind(kind)
+    {
+        ASSERT(kind == WarmAny || kind == ColdAny || kind == LateColdAny || kind == SomeRegister || kind == SomeEarlyRegister);
+    }
+
+    static ValueRep reg(Reg reg)
+    {
+        return ValueRep(reg);
+    }
+
+    static ValueRep lateReg(Reg reg)
+    {
+        ValueRep result(reg);
+        result.m_kind = LateRegister;
+        return result;
+    }
+
+    static ValueRep stack(intptr_t offsetFromFP)
+    {
+        ValueRep result;
+        result.m_kind = Stack;
+        result.u.offsetFromFP = offsetFromFP;
+        return result;
+    }
+
+    static ValueRep stackArgument(intptr_t offsetFromSP)
+    {
+        ValueRep result;
+        result.m_kind = StackArgument;
+        result.u.offsetFromSP = offsetFromSP;
+        return result;
+    }
+
+    static ValueRep constant(int64_t value)
+    {
+        ValueRep result;
+        result.m_kind = Constant;
+        result.u.value = value;
+        return result;
+    }
+
+    static ValueRep constantDouble(double value)
+    {
+        return ValueRep::constant(bitwise_cast(value));
+    }
+
+    Kind kind() const { return m_kind; }
+
+    bool operator==(const ValueRep& other) const
+    {
+        if (kind() != other.kind())
+            return false;
+        switch (kind()) {
+        case LateRegister:
+        case Register:
+            return u.reg == other.u.reg;
+        case Stack:
+            return u.offsetFromFP == other.u.offsetFromFP;
+        case StackArgument:
+            return u.offsetFromSP == other.u.offsetFromSP;
+        case Constant:
+            return u.value == other.u.value;
+        default:
+            return true;
+        }
+    }
+
+    bool operator!=(const ValueRep& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const { return kind() != WarmAny; }
+
+    bool isAny() const { return kind() == WarmAny || kind() == ColdAny || kind() == LateColdAny; }
+
+    bool isReg() const { return kind() == Register || kind() == LateRegister; }
+    
+    Reg reg() const
+    {
+        ASSERT(isReg());
+        return u.reg;
+    }
+
+    bool isGPR() const { return isReg() && reg().isGPR(); }
+    bool isFPR() const { return isReg() && reg().isFPR(); }
+
+    GPRReg gpr() const { return reg().gpr(); }
+    FPRReg fpr() const { return reg().fpr(); }
+
+    bool isStack() const { return kind() == Stack; }
+
+    intptr_t offsetFromFP() const
+    {
+        ASSERT(isStack());
+        return u.offsetFromFP;
+    }
+
+    bool isStackArgument() const { return kind() == StackArgument; }
+
+    intptr_t offsetFromSP() const
+    {
+        ASSERT(isStackArgument());
+        return u.offsetFromSP;
+    }
+
+    bool isConstant() const { return kind() == Constant; }
+
+    int64_t value() const
+    {
+        ASSERT(isConstant());
+        return u.value;
+    }
+
+    double doubleValue() const
+    {
+        return bitwise_cast(value());
+    }
+
+    ValueRep withOffset(intptr_t offset) const
+    {
+        switch (kind()) {
+        case Stack:
+            return stack(offsetFromFP() + offset);
+        case StackArgument:
+            return stackArgument(offsetFromSP() + offset);
+        default:
+            return *this;
+        }
+    }
+
+    void addUsedRegistersTo(RegisterSet&) const;
+    
+    RegisterSet usedRegisters() const;
+
+    // Get the used registers for a vector of ValueReps.
+    template
+    static RegisterSet usedRegisters(const VectorType& vector)
+    {
+        RegisterSet result;
+        for (const ValueRep& value : vector)
+            value.addUsedRegistersTo(result);
+        return result;
+    }
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    // This has a simple contract: it emits code to restore the value into the given register. This
+    // will work even if it requires moving between bits a GPR and a FPR.
+    void emitRestore(AssemblyHelpers&, Reg) const;
+
+    // Computes the ValueRecovery assuming that the Value* was for a JSValue (i.e. Int64).
+    // NOTE: We should avoid putting JSValue-related methods in B3, but this was hard to avoid
+    // because some parts of JSC use ValueRecovery like a general "where my bits at" object, almost
+    // exactly like ValueRep.
+    ValueRecovery recoveryForJSValue() const;
+
+private:
+    Kind m_kind;
+    union U {
+        Reg reg;
+        intptr_t offsetFromFP;
+        intptr_t offsetFromSP;
+        int64_t value;
+
+        U()
+        {
+            memset(this, 0, sizeof(*this));
+        }
+    } u;
+};
+
+} } // namespace JSC::B3
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::B3::ValueRep::Kind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3Variable.cpp b/b3/B3Variable.cpp
new file mode 100644
index 0000000..2314ee2
--- /dev/null
+++ b/b3/B3Variable.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3Variable.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+Variable::~Variable()
+{
+}
+
+void Variable::dump(PrintStream& out) const
+{
+    out.print("var", m_index);
+}
+
+void Variable::deepDump(PrintStream& out) const
+{
+    out.print(m_type, " var", m_index);
+}
+
+Variable::Variable(Type type)
+    : m_type(type)
+{
+    ASSERT(type != Void);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3Variable.h b/b3/B3Variable.h
new file mode 100644
index 0000000..f4d610f
--- /dev/null
+++ b/b3/B3Variable.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3SparseCollection.h"
+#include "B3Type.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+class Variable {
+    WTF_MAKE_NONCOPYABLE(Variable);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    ~Variable();
+
+    Type type() const { return m_type; }
+    unsigned index() const { return m_index; }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Procedure;
+    friend class SparseCollection;
+
+    Variable(Type);
+    
+    unsigned m_index;
+    Type m_type;
+};
+
+class DeepVariableDump {
+public:
+    DeepVariableDump(const Variable* variable)
+        : m_variable(variable)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_variable)
+            m_variable->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const Variable* m_variable;
+};
+
+inline DeepVariableDump deepDump(const Variable* variable)
+{
+    return DeepVariableDump(variable);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3VariableValue.cpp b/b3/B3VariableValue.cpp
new file mode 100644
index 0000000..6aeef47
--- /dev/null
+++ b/b3/B3VariableValue.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "B3VariableValue.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3Variable.h"
+
+namespace JSC { namespace B3 {
+
+VariableValue::~VariableValue()
+{
+}
+
+void VariableValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, pointerDump(m_variable));
+}
+
+Value* VariableValue::cloneImpl() const
+{
+    return new VariableValue(*this);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable, Value* value)
+    : Value(CheckedOpcode, kind, Void, origin, value)
+    , m_variable(variable)
+{
+    ASSERT(kind == Set);
+}
+
+VariableValue::VariableValue(Kind kind, Origin origin, Variable* variable)
+    : Value(CheckedOpcode, kind, variable->type(), origin)
+    , m_variable(variable)
+{
+    ASSERT(kind == Get);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3VariableValue.h b/b3/B3VariableValue.h
new file mode 100644
index 0000000..067ba42
--- /dev/null
+++ b/b3/B3VariableValue.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class Variable;
+
+class JS_EXPORT_PRIVATE VariableValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == Get || kind == Set; }
+
+    ~VariableValue();
+
+    Variable* variable() const { return m_variable; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    // Use this for Set.
+    VariableValue(Kind, Origin, Variable*, Value*);
+
+    // Use this for Get.
+    VariableValue(Kind, Origin, Variable*);
+
+    Variable* m_variable;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3WasmAddressValue.cpp b/b3/B3WasmAddressValue.cpp
new file mode 100644
index 0000000..57d7628
--- /dev/null
+++ b/b3/B3WasmAddressValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmAddressValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmAddressValue::~WasmAddressValue()
+{
+}
+
+void WasmAddressValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, m_pinnedGPR);
+}
+
+Value* WasmAddressValue::cloneImpl() const
+{
+    return new WasmAddressValue(*this);
+}
+
+WasmAddressValue::WasmAddressValue(Origin origin, Value* value, GPRReg pinnedGPR)
+    : Value(CheckedOpcode, WasmAddress, Int64, origin, value)
+    , m_pinnedGPR(pinnedGPR)
+{
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/B3WasmAddressValue.h b/b3/B3WasmAddressValue.h
new file mode 100644
index 0000000..d938602
--- /dev/null
+++ b/b3/B3WasmAddressValue.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+
+namespace JSC { namespace B3 {
+
+class JS_EXPORT_PRIVATE WasmAddressValue : public Value {
+public:
+    static bool accepts(Kind kind) { return kind == WasmAddress; }
+
+    ~WasmAddressValue();
+
+    GPRReg pinnedGPR() const { return m_pinnedGPR; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    WasmAddressValue(Origin, Value*, GPRReg);
+
+    GPRReg m_pinnedGPR;
+};
+
+} } // namespace JSC::B3
+
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3WasmBoundsCheckValue.cpp b/b3/B3WasmBoundsCheckValue.cpp
new file mode 100644
index 0000000..b3a3290
--- /dev/null
+++ b/b3/B3WasmBoundsCheckValue.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "B3WasmBoundsCheckValue.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 {
+
+WasmBoundsCheckValue::~WasmBoundsCheckValue()
+{
+}
+
+WasmBoundsCheckValue::WasmBoundsCheckValue(Origin origin, Value* ptr, GPRReg pinnedGPR, unsigned offset)
+    : Value(CheckedOpcode, WasmBoundsCheck, origin, ptr)
+    , m_pinnedGPR(pinnedGPR)
+    , m_offset(offset)
+{
+}
+
+Value* WasmBoundsCheckValue::cloneImpl() const
+{
+    return new WasmBoundsCheckValue(*this);
+}
+
+void WasmBoundsCheckValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const
+{
+    out.print(comma, "sizeRegister = ", m_pinnedGPR, ", offset = ", m_offset);
+}
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/B3WasmBoundsCheckValue.h b/b3/B3WasmBoundsCheckValue.h
new file mode 100644
index 0000000..ccc54b8
--- /dev/null
+++ b/b3/B3WasmBoundsCheckValue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3Value.h"
+#include "CCallHelpers.h"
+
+namespace JSC { namespace B3 {
+
+class WasmBoundsCheckValue : public Value {
+public:
+    static bool accepts(Kind kind)
+    {
+        switch (kind.opcode()) {
+        case WasmBoundsCheck:
+            return true;
+        default:
+            return false;
+        }
+    }
+    
+    ~WasmBoundsCheckValue();
+
+    GPRReg pinnedGPR() const { return m_pinnedGPR; }
+    unsigned offset() const { return m_offset; }
+
+protected:
+    void dumpMeta(CommaPrinter&, PrintStream&) const override;
+
+    Value* cloneImpl() const override;
+
+private:
+    friend class Procedure;
+
+    JS_EXPORT_PRIVATE WasmBoundsCheckValue(Origin, Value* ptr, GPRReg pinnedGPR, unsigned offset);
+
+    GPRReg m_pinnedGPR;
+    unsigned m_offset;
+};
+
+} } // namespace JSC::B3
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirAllocateStack.cpp b/b3/air/AirAllocateStack.cpp
new file mode 100644
index 0000000..de9297f
--- /dev/null
+++ b/b3/air/AirAllocateStack.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirAllocateStack.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+const bool verbose = false;
+
+bool attemptAssignment(
+    StackSlot* slot, intptr_t offsetFromFP, const Vector& otherSlots)
+{
+    if (verbose)
+        dataLog("Attempting to assign ", pointerDump(slot), " to ", offsetFromFP, " with interference ", pointerListDump(otherSlots), "\n");
+
+    // Need to align it to the slot's desired alignment.
+    offsetFromFP = -WTF::roundUpToMultipleOf(slot->alignment(), -offsetFromFP);
+    
+    for (StackSlot* otherSlot : otherSlots) {
+        if (!otherSlot->offsetFromFP())
+            continue;
+        bool overlap = WTF::rangesOverlap(
+            offsetFromFP,
+            offsetFromFP + static_cast(slot->byteSize()),
+            otherSlot->offsetFromFP(),
+            otherSlot->offsetFromFP() + static_cast(otherSlot->byteSize()));
+        if (overlap)
+            return false;
+    }
+
+    if (verbose)
+        dataLog("Assigned ", pointerDump(slot), " to ", offsetFromFP, "\n");
+    slot->setOffsetFromFP(offsetFromFP);
+    return true;
+}
+
+void assign(StackSlot* slot, const Vector& otherSlots)
+{
+    if (verbose)
+        dataLog("Attempting to assign ", pointerDump(slot), " with interference ", pointerListDump(otherSlots), "\n");
+    
+    if (attemptAssignment(slot, -static_cast(slot->byteSize()), otherSlots))
+        return;
+
+    for (StackSlot* otherSlot : otherSlots) {
+        if (!otherSlot->offsetFromFP())
+            continue;
+        bool didAssign = attemptAssignment(
+            slot, otherSlot->offsetFromFP() - static_cast(slot->byteSize()), otherSlots);
+        if (didAssign)
+            return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // anonymous namespace
+
+void allocateStack(Code& code)
+{
+    PhaseScope phaseScope(code, "allocateStack");
+
+    // Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
+    // the possibility of stack slots being assigned frame offsets before we even get here.
+    ASSERT(!code.frameSize());
+    Vector assignedEscapedStackSlots;
+    Vector escapedStackSlotsWorklist;
+    for (StackSlot* slot : code.stackSlots()) {
+        if (slot->isLocked()) {
+            if (slot->offsetFromFP())
+                assignedEscapedStackSlots.append(slot);
+            else
+                escapedStackSlotsWorklist.append(slot);
+        } else {
+            // It would be super strange to have an unlocked stack slot that has an offset already.
+            ASSERT(!slot->offsetFromFP());
+        }
+    }
+    // This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
+    // escaped stack slots.
+    while (!escapedStackSlotsWorklist.isEmpty()) {
+        StackSlot* slot = escapedStackSlotsWorklist.takeLast();
+        assign(slot, assignedEscapedStackSlots);
+        assignedEscapedStackSlots.append(slot);
+    }
+
+    // Now we handle the spill slots.
+    StackSlotLiveness liveness(code);
+    IndexMap> interference(code.stackSlots().size());
+    Vector slots;
+
+    for (BasicBlock* block : code) {
+        StackSlotLiveness::LocalCalc localCalc(liveness, block);
+
+        auto interfere = [&] (unsigned instIndex) {
+            if (verbose)
+                dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");
+
+            Inst::forEachDef(
+                block->get(instIndex), block->get(instIndex + 1),
+                [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                    if (!arg.isStack())
+                        return;
+                    StackSlot* slot = arg.stackSlot();
+                    if (slot->kind() != StackSlotKind::Spill)
+                        return;
+
+                    for (StackSlot* otherSlot : localCalc.live()) {
+                        interference[slot].add(otherSlot);
+                        interference[otherSlot].add(slot);
+                    }
+                });
+        };
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            if (verbose)
+                dataLog("Analyzing: ", block->at(instIndex), "\n");
+
+            // Kill dead stores. For simplicity we say that a store is killable if it has only late
+            // defs and those late defs are to things that are dead right now. We only do that
+            // because that's the only kind of dead stack store we will see here.
+            Inst& inst = block->at(instIndex);
+            if (!inst.hasNonArgEffects()) {
+                bool ok = true;
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        if (Arg::isEarlyDef(role)) {
+                            ok = false;
+                            return;
+                        }
+                        if (!Arg::isLateDef(role))
+                            return;
+                        if (!arg.isStack()) {
+                            ok = false;
+                            return;
+                        }
+                        StackSlot* slot = arg.stackSlot();
+                        if (slot->kind() != StackSlotKind::Spill) {
+                            ok = false;
+                            return;
+                        }
+
+                        if (localCalc.isLive(slot)) {
+                            ok = false;
+                            return;
+                        }
+                    });
+                if (ok)
+                    inst = Inst();
+            }
+            
+            interfere(instIndex);
+            localCalc.execute(instIndex);
+        }
+        interfere(-1);
+        
+        block->insts().removeAllMatching(
+            [&] (const Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+
+    if (verbose) {
+        for (StackSlot* slot : code.stackSlots())
+            dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
+    }
+
+    // Now we assign stack locations. At its heart this algorithm is just first-fit. For each
+    // StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
+    // overlap with other StackSlots that this overlaps with.
+    Vector otherSlots = assignedEscapedStackSlots;
+    for (StackSlot* slot : code.stackSlots()) {
+        if (slot->offsetFromFP()) {
+            // Already assigned an offset.
+            continue;
+        }
+
+        HashSet& interferingSlots = interference[slot];
+        otherSlots.resize(assignedEscapedStackSlots.size());
+        otherSlots.resize(assignedEscapedStackSlots.size() + interferingSlots.size());
+        unsigned nextIndex = assignedEscapedStackSlots.size();
+        for (StackSlot* otherSlot : interferingSlots)
+            otherSlots[nextIndex++] = otherSlot;
+
+        assign(slot, otherSlots);
+    }
+
+    // Figure out how much stack we're using for stack slots.
+    unsigned frameSizeForStackSlots = 0;
+    for (StackSlot* slot : code.stackSlots()) {
+        frameSizeForStackSlots = std::max(
+            frameSizeForStackSlots,
+            static_cast(-slot->offsetFromFP()));
+    }
+
+    frameSizeForStackSlots = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSizeForStackSlots);
+
+    // Now we need to deduce how much argument area we need.
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            for (Arg& arg : inst.args) {
+                if (arg.isCallArg()) {
+                    // For now, we assume that we use 8 bytes of the call arg. But that's not
+                    // such an awesome assumption.
+                    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
+                    ASSERT(arg.offset() >= 0);
+                    code.requestCallArgAreaSizeInBytes(arg.offset() + 8);
+                }
+            }
+        }
+    }
+
+    code.setFrameSize(frameSizeForStackSlots + code.callArgAreaSizeInBytes());
+
+    // Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
+    // transformation since we can search the StackSlots array to figure out which StackSlot any
+    // offset-from-FP refers to.
+
+    // FIXME: This may produce addresses that aren't valid if we end up with a ginormous stack frame.
+    // We would have to scavenge for temporaries if this happened. Fortunately, this case will be
+    // extremely rare so we can do crazy things when it arises.
+    // https://bugs.webkit.org/show_bug.cgi?id=152530
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+            inst.forEachArg(
+                [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+                    auto stackAddr = [&] (int32_t offset) -> Arg {
+                        return Arg::stackAddr(offset, code.frameSize(), width);
+                    };
+                    
+                    switch (arg.kind()) {
+                    case Arg::Stack: {
+                        StackSlot* slot = arg.stackSlot();
+                        if (Arg::isZDef(role)
+                            && slot->kind() == StackSlotKind::Spill
+                            && slot->byteSize() > Arg::bytes(width)) {
+                            // Currently we only handle this simple case because it's the only one
+                            // that arises: ZDef's are only 32-bit right now. So, when we hit these
+                            // assertions it means that we need to implement those other kinds of
+                            // zero fills.
+                            RELEASE_ASSERT(slot->byteSize() == 8);
+                            RELEASE_ASSERT(width == Arg::Width32);
+
+                            RELEASE_ASSERT(isValidForm(StoreZero32, Arg::Stack));
+                            insertionSet.insert(
+                                instIndex + 1, StoreZero32, inst.origin,
+                                stackAddr(arg.offset() + 4 + slot->offsetFromFP()));
+                        }
+                        arg = stackAddr(arg.offset() + slot->offsetFromFP());
+                        break;
+                    }
+                    case Arg::CallArg:
+                        arg = stackAddr(arg.offset() - code.frameSize());
+                        break;
+                    default:
+                        break;
+                    }
+                }
+            );
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/b3/air/AirAllocateStack.h b/b3/air/AirAllocateStack.h
new file mode 100644
index 0000000..31519d2
--- /dev/null
+++ b/b3/air/AirAllocateStack.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This allocates StackSlots to places on the stack. It first allocates the pinned ones in index
+// order and then it allocates the rest using first fit. Takes the opportunity to kill dead
+// assignments to stack slots, since it knows which ones are live. Also fixes ZDefs to anonymous
+// stack slots.
+
+void allocateStack(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirArg.cpp b/b3/air/AirArg.cpp
new file mode 100644
index 0000000..c777928
--- /dev/null
+++ b/b3/air/AirArg.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirArg.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Arg::isStackMemory() const
+{
+    switch (kind()) {
+    case Addr:
+        return base() == Air::Tmp(GPRInfo::callFrameRegister)
+            || base() == Air::Tmp(MacroAssembler::stackPointerRegister);
+    case Stack:
+    case CallArg:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool Arg::isRepresentableAs(Width width, Signedness signedness) const
+{
+    return isRepresentableAs(width, signedness, value());
+}
+
+bool Arg::usesTmp(Air::Tmp tmp) const
+{
+    bool uses = false;
+    const_cast(this)->forEachTmpFast(
+        [&] (Air::Tmp otherTmp) {
+            if (otherTmp == tmp)
+                uses = true;
+        });
+    return uses;
+}
+
+bool Arg::canRepresent(Value* value) const
+{
+    return isType(typeForB3Type(value->type()));
+}
+
+bool Arg::isCompatibleType(const Arg& other) const
+{
+    if (hasType())
+        return other.isType(type());
+    if (other.hasType())
+        return isType(other.type());
+    return true;
+}
+
+unsigned Arg::jsHash() const
+{
+    unsigned result = static_cast(m_kind);
+    
+    switch (m_kind) {
+    case Invalid:
+    case Special:
+        break;
+    case Tmp:
+        result += m_base.internalValue();
+        break;
+    case Imm:
+    case BitImm:
+    case CallArg:
+    case RelCond:
+    case ResCond:
+    case DoubleCond:
+    case WidthArg:
+        result += static_cast(m_offset);
+        break;
+    case BigImm:
+    case BitImm64:
+        result += static_cast(m_offset);
+        result += static_cast(m_offset >> 32);
+        break;
+    case Addr:
+        result += m_offset;
+        result += m_base.internalValue();
+        break;
+    case Index:
+        result += static_cast(m_offset);
+        result += m_scale;
+        result += m_base.internalValue();
+        result += m_index.internalValue();
+        break;
+    case Stack:
+        result += static_cast(m_scale);
+        result += stackSlot()->index();
+        break;
+    }
+    
+    return result;
+}
+
+void Arg::dump(PrintStream& out) const
+{
+    switch (m_kind) {
+    case Invalid:
+        out.print("");
+        return;
+    case Tmp:
+        out.print(tmp());
+        return;
+    case Imm:
+        out.print("$", m_offset);
+        return;
+    case BigImm:
+        out.printf("$0x%llx", static_cast(m_offset));
+        return;
+    case BitImm:
+        out.print("$", m_offset);
+        return;
+    case BitImm64:
+        out.printf("$0x%llx", static_cast(m_offset));
+        return;
+    case Addr:
+        if (offset())
+            out.print(offset());
+        out.print("(", base(), ")");
+        return;
+    case Index:
+        if (offset())
+            out.print(offset());
+        out.print("(", base(), ",", index());
+        if (scale() != 1)
+            out.print(",", scale());
+        out.print(")");
+        return;
+    case Stack:
+        if (offset())
+            out.print(offset());
+        out.print("(", pointerDump(stackSlot()), ")");
+        return;
+    case CallArg:
+        if (offset())
+            out.print(offset());
+        out.print("(callArg)");
+        return;
+    case RelCond:
+        out.print(asRelationalCondition());
+        return;
+    case ResCond:
+        out.print(asResultCondition());
+        return;
+    case DoubleCond:
+        out.print(asDoubleCondition());
+        return;
+    case Special:
+        out.print(pointerDump(special()));
+        return;
+    case WidthArg:
+        out.print(width());
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, Arg::Kind kind)
+{
+    switch (kind) {
+    case Arg::Invalid:
+        out.print("Invalid");
+        return;
+    case Arg::Tmp:
+        out.print("Tmp");
+        return;
+    case Arg::Imm:
+        out.print("Imm");
+        return;
+    case Arg::BigImm:
+        out.print("BigImm");
+        return;
+    case Arg::BitImm:
+        out.print("BitImm");
+        return;
+    case Arg::BitImm64:
+        out.print("BitImm64");
+        return;
+    case Arg::Addr:
+        out.print("Addr");
+        return;
+    case Arg::Stack:
+        out.print("Stack");
+        return;
+    case Arg::CallArg:
+        out.print("CallArg");
+        return;
+    case Arg::Index:
+        out.print("Index");
+        return;
+    case Arg::RelCond:
+        out.print("RelCond");
+        return;
+    case Arg::ResCond:
+        out.print("ResCond");
+        return;
+    case Arg::DoubleCond:
+        out.print("DoubleCond");
+        return;
+    case Arg::Special:
+        out.print("Special");
+        return;
+    case Arg::WidthArg:
+        out.print("WidthArg");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Role role)
+{
+    switch (role) {
+    case Arg::Use:
+        out.print("Use");
+        return;
+    case Arg::Def:
+        out.print("Def");
+        return;
+    case Arg::UseDef:
+        out.print("UseDef");
+        return;
+    case Arg::ZDef:
+        out.print("ZDef");
+        return;
+    case Arg::UseZDef:
+        out.print("UseZDef");
+        return;
+    case Arg::UseAddr:
+        out.print("UseAddr");
+        return;
+    case Arg::ColdUse:
+        out.print("ColdUse");
+        return;
+    case Arg::LateUse:
+        out.print("LateUse");
+        return;
+    case Arg::LateColdUse:
+        out.print("LateColdUse");
+        return;
+    case Arg::EarlyDef:
+        out.print("EarlyDef");
+        return;
+    case Arg::Scratch:
+        out.print("Scratch");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Type type)
+{
+    switch (type) {
+    case Arg::GP:
+        out.print("GP");
+        return;
+    case Arg::FP:
+        out.print("FP");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Width width)
+{
+    switch (width) {
+    case Arg::Width8:
+        out.print("8");
+        return;
+    case Arg::Width16:
+        out.print("16");
+        return;
+    case Arg::Width32:
+        out.print("32");
+        return;
+    case Arg::Width64:
+        out.print("64");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, Arg::Signedness signedness)
+{
+    switch (signedness) {
+    case Arg::Signed:
+        out.print("Signed");
+        return;
+    case Arg::Unsigned:
+        out.print("Unsigned");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirArg.h b/b3/air/AirArg.h
new file mode 100644
index 0000000..13db1ce
--- /dev/null
+++ b/b3/air/AirArg.h
@@ -0,0 +1,1383 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirTmp.h"
+#include "B3Common.h"
+#include "B3Type.h"
+#include 
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Special;
+class StackSlot;
+
+// This class name is also intentionally terse because we will say it a lot. You'll see code like
+// Inst(..., Arg::imm(5), Arg::addr(thing, blah), ...)
+class Arg {
+public:
+    // These enum members are intentionally terse because we have to mention them a lot.
+    enum Kind : int8_t {
+        Invalid,
+
+        // This is either an unassigned temporary or a register. All unassigned temporaries
+        // eventually become registers.
+        Tmp,
+
+        // This is an immediate that the instruction will materialize. Imm is the immediate that can be
+        // inlined into most instructions, while BigImm indicates a constant materialization and is
+        // usually only usable with Move. Specials may also admit it, for example for stackmaps used for
+        // OSR exit and tail calls.
+        // BitImm is an immediate for Bitwise operation (And, Xor, etc).
+        Imm,
+        BigImm,
+        BitImm,
+        BitImm64,
+
+        // These are the addresses. Instructions may load from (Use), store to (Def), or evaluate
+        // (UseAddr) addresses.
+        Addr,
+        Stack,
+        CallArg,
+        Index,
+
+        // Immediate operands that customize the behavior of an operation. You can think of them as
+        // secondary opcodes. They are always "Use"'d.
+        RelCond,
+        ResCond,
+        DoubleCond,
+        Special,
+        WidthArg
+    };
+
+    enum Role : int8_t {
+        // Use means that the Inst will read from this value before doing anything else.
+        //
+        // For Tmp: The Inst will read this Tmp.
+        // For Arg::addr and friends: The Inst will load from this address.
+        // For Arg::imm and friends: The Inst will materialize and use this immediate.
+        // For RelCond/ResCond/Special: This is the only valid role for these kinds.
+        //
+        // Note that Use of an address does not mean escape. It only means that the instruction will
+        // load from the address before doing anything else. This is a bit tricky; for example
+        // Specials could theoretically squirrel away the address and effectively escape it. However,
+        // this is not legal. On the other hand, any address other than Stack is presumed to be
+        // always escaping, and Stack is presumed to be always escaping if it's Locked.
+        Use,
+
+        // Exactly like Use, except that it also implies that the use is cold: that is, replacing the
+        // use with something on the stack is free.
+        ColdUse,
+
+        // LateUse means that the Inst will read from this value after doing its Def's. Note that LateUse
+        // on an Addr or Index still means Use on the internal temporaries. Note that specifying the
+        // same Tmp once as Def and once as LateUse has undefined behavior: the use may happen before
+        // the def, or it may happen after it.
+        LateUse,
+
+        // Combination of LateUse and ColdUse.
+        LateColdUse,
+
+        // Def means that the Inst will write to this value after doing everything else.
+        //
+        // For Tmp: The Inst will write to this Tmp.
+        // For Arg::addr and friends: The Inst will store to this address.
+        // This isn't valid for any other kinds.
+        //
+        // Like Use of address, Def of address does not mean escape.
+        Def,
+
+        // This is a special variant of Def that implies that the upper bits of the target register are
+        // zero-filled. Specifically, if the Width of a ZDef is less than the largest possible width of
+        // the argument (for example, we're on a 64-bit machine and we have a Width32 ZDef of a GPR) then
+        // this has different implications for the upper bits (i.e. the top 32 bits in our example)
+        // depending on the kind of the argument:
+        //
+        // For register: the upper bits are zero-filled.
+        // For anonymous stack slot: the upper bits are zero-filled.
+        // For address: the upper bits are not touched (i.e. we do a 32-bit store in our example).
+        // For tmp: either the upper bits are not touched or they are zero-filled, and we won't know
+        // which until we lower the tmp to either a StackSlot or a Reg.
+        //
+        // The behavior of ZDef is consistent with what happens when you perform 32-bit operations on a
+        // 64-bit GPR. It's not consistent with what happens with 8-bit or 16-bit Defs on x86 GPRs, or
+        // what happens with float Defs in ARM NEON or X86 SSE. Hence why we have both Def and ZDef.
+        ZDef,
+
+        // This is a combined Use and Def. It means that both things happen.
+        UseDef,
+
+        // This is a combined Use and ZDef. It means that both things happen.
+        UseZDef,
+
+        // This is like Def, but implies that the assignment occurs before the start of the Inst's
+        // execution rather than after. Note that specifying the same Tmp once as EarlyDef and once
+        // as Use has undefined behavior: the use may happen before the def, or it may happen after
+        // it.
+        EarlyDef,
+
+        // Some instructions need a scratch register. We model this by saying that the temporary is
+        // defined early and used late. This role implies that.
+        Scratch,
+
+        // This is a special kind of use that is only valid for addresses. It means that the
+        // instruction will evaluate the address expression and consume the effective address, but it
+        // will neither load nor store. This is an escaping use, because now the address may be
+        // passed along to who-knows-where. Note that this isn't really a Use of the Arg, but it does
+        // imply that we're Use'ing any registers that the Arg contains.
+        UseAddr
+    };
+
+    enum Type : int8_t {
+        GP,
+        FP
+    };
+
+    static const unsigned numTypes = 2;
+
+    template
+    static void forEachType(const Functor& functor)
+    {
+        functor(GP);
+        functor(FP);
+    }
+
+    enum Width : int8_t {
+        Width8,
+        Width16,
+        Width32,
+        Width64
+    };
+
+    static Width pointerWidth()
+    {
+        if (sizeof(void*) == 8)
+            return Width64;
+        return Width32;
+    }
+
+    enum Signedness : int8_t {
+        Signed,
+        Unsigned
+    };
+
+    // Returns true if the Role implies that the Inst will Use the Arg. It's deliberately false for
+    // UseAddr, since isAnyUse() for an Arg::addr means that we are loading from the address.
+    static bool isAnyUse(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseDef:
+        case UseZDef:
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+            return true;
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static bool isColdUse(Role role)
+    {
+        switch (role) {
+        case ColdUse:
+        case LateColdUse:
+            return true;
+        case Use:
+        case UseDef:
+        case UseZDef:
+        case LateUse:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case Scratch:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static bool isWarmUse(Role role)
+    {
+        return isAnyUse(role) && !isColdUse(role);
+    }
+
+    static Role cooled(Role role)
+    {
+        switch (role) {
+        case ColdUse:
+        case LateColdUse:
+        case UseDef:
+        case UseZDef:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case Scratch:
+        case EarlyDef:
+            return role;
+        case Use:
+            return ColdUse;
+        case LateUse:
+            return LateColdUse;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Use the Arg before doing anything else.
+    static bool isEarlyUse(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseDef:
+        case UseZDef:
+            return true;
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Use the Arg after doing everything else.
+    static bool isLateUse(Role role)
+    {
+        switch (role) {
+        case LateUse:
+        case LateColdUse:
+        case Scratch:
+            return true;
+        case ColdUse:
+        case Use:
+        case UseDef:
+        case UseZDef:
+        case Def:
+        case ZDef:
+        case UseAddr:
+        case EarlyDef:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg.
+    static bool isAnyDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case LateColdUse:
+            return false;
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+        case EarlyDef:
+        case Scratch:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg before start of execution.
+    static bool isEarlyDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+        case LateColdUse:
+            return false;
+        case EarlyDef:
+        case Scratch:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will Def the Arg after the end of execution.
+    static bool isLateDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case EarlyDef:
+        case Scratch:
+        case LateColdUse:
+            return false;
+        case Def:
+        case UseDef:
+        case ZDef:
+        case UseZDef:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Returns true if the Role implies that the Inst will ZDef the Arg.
+    static bool isZDef(Role role)
+    {
+        switch (role) {
+        case Use:
+        case ColdUse:
+        case UseAddr:
+        case LateUse:
+        case Def:
+        case UseDef:
+        case EarlyDef:
+        case Scratch:
+        case LateColdUse:
+            return false;
+        case ZDef:
+        case UseZDef:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static Type typeForB3Type(B3::Type type)
+    {
+        switch (type) {
+        case Void:
+            ASSERT_NOT_REACHED();
+            return GP;
+        case Int32:
+        case Int64:
+            return GP;
+        case Float:
+        case Double:
+            return FP;
+        }
+        ASSERT_NOT_REACHED();
+        return GP;
+    }
+
+    static Width widthForB3Type(B3::Type type)
+    {
+        switch (type) {
+        case Void:
+            ASSERT_NOT_REACHED();
+            return Width8;
+        case Int32:
+        case Float:
+            return Width32;
+        case Int64:
+        case Double:
+            return Width64;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    static Width conservativeWidth(Type type)
+    {
+        return type == GP ? pointerWidth() : Width64;
+    }
+
+    static Width minimumWidth(Type type)
+    {
+        return type == GP ? Width8 : Width32;
+    }
+
+    static unsigned bytes(Width width)
+    {
+        return 1 << width;
+    }
+
+    static Width widthForBytes(unsigned bytes)
+    {
+        switch (bytes) {
+        case 0:
+        case 1:
+            return Width8;
+        case 2:
+            return Width16;
+        case 3:
+        case 4:
+            return Width32;
+        default:
+            return Width64;
+        }
+    }
+
+    Arg()
+        : m_kind(Invalid)
+    {
+    }
+
+    Arg(Air::Tmp tmp)
+        : m_kind(Tmp)
+        , m_base(tmp)
+    {
+    }
+
+    Arg(Reg reg)
+        : Arg(Air::Tmp(reg))
+    {
+    }
+
+    static Arg imm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = Imm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bigImm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BigImm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bitImm(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BitImm;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg bitImm64(int64_t value)
+    {
+        Arg result;
+        result.m_kind = BitImm64;
+        result.m_offset = value;
+        return result;
+    }
+
+    static Arg immPtr(const void* address)
+    {
+        return bigImm(bitwise_cast(address));
+    }
+
+    static Arg addr(Air::Tmp base, int32_t offset = 0)
+    {
+        ASSERT(base.isGP());
+        Arg result;
+        result.m_kind = Addr;
+        result.m_base = base;
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg stack(StackSlot* value, int32_t offset = 0)
+    {
+        Arg result;
+        result.m_kind = Stack;
+        result.m_offset = bitwise_cast(value);
+        result.m_scale = offset; // I know, yuck.
+        return result;
+    }
+
+    static Arg callArg(int32_t offset)
+    {
+        Arg result;
+        result.m_kind = CallArg;
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg stackAddr(int32_t offsetFromFP, unsigned frameSize, Width width)
+    {
+        Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), offsetFromFP);
+        if (!result.isValidForm(width)) {
+            result = Arg::addr(
+                Air::Tmp(MacroAssembler::stackPointerRegister),
+                offsetFromFP + frameSize);
+        }
+        return result;
+    }
+
+    // If you don't pass a Width, this optimistically assumes that you're using the right width.
+    static bool isValidScale(unsigned scale, std::optional width = std::nullopt)
+    {
+        switch (scale) {
+        case 1:
+            if (isX86() || isARM64())
+                return true;
+            return false;
+        case 2:
+        case 4:
+        case 8:
+            if (isX86())
+                return true;
+            if (isARM64()) {
+                if (!width)
+                    return true;
+                return scale == 1 || scale == bytes(*width);
+            }
+            return false;
+        default:
+            return false;
+        }
+    }
+
+    static unsigned logScale(unsigned scale)
+    {
+        switch (scale) {
+        case 1:
+            return 0;
+        case 2:
+            return 1;
+        case 4:
+            return 2;
+        case 8:
+            return 3;
+        default:
+            ASSERT_NOT_REACHED();
+            return 0;
+        }
+    }
+
+    static Arg index(Air::Tmp base, Air::Tmp index, unsigned scale = 1, int32_t offset = 0)
+    {
+        ASSERT(base.isGP());
+        ASSERT(index.isGP());
+        ASSERT(isValidScale(scale));
+        Arg result;
+        result.m_kind = Index;
+        result.m_base = base;
+        result.m_index = index;
+        result.m_scale = static_cast(scale);
+        result.m_offset = offset;
+        return result;
+    }
+
+    static Arg relCond(MacroAssembler::RelationalCondition condition)
+    {
+        Arg result;
+        result.m_kind = RelCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg resCond(MacroAssembler::ResultCondition condition)
+    {
+        Arg result;
+        result.m_kind = ResCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg doubleCond(MacroAssembler::DoubleCondition condition)
+    {
+        Arg result;
+        result.m_kind = DoubleCond;
+        result.m_offset = condition;
+        return result;
+    }
+
+    static Arg special(Air::Special* special)
+    {
+        Arg result;
+        result.m_kind = Special;
+        result.m_offset = bitwise_cast(special);
+        return result;
+    }
+
+    static Arg widthArg(Width width)
+    {
+        Arg result;
+        result.m_kind = WidthArg;
+        result.m_offset = width;
+        return result;
+    }
+
+    bool operator==(const Arg& other) const
+    {
+        return m_offset == other.m_offset
+            && m_kind == other.m_kind
+            && m_base == other.m_base
+            && m_index == other.m_index
+            && m_scale == other.m_scale;
+    }
+
+    bool operator!=(const Arg& other) const
+    {
+        return !(*this == other);
+    }
+
+    explicit operator bool() const { return *this != Arg(); }
+
+    Kind kind() const
+    {
+        return m_kind;
+    }
+
+    bool isTmp() const
+    {
+        return kind() == Tmp;
+    }
+
+    bool isImm() const
+    {
+        return kind() == Imm;
+    }
+
+    bool isBigImm() const
+    {
+        return kind() == BigImm;
+    }
+
+    bool isBitImm() const
+    {
+        return kind() == BitImm;
+    }
+
+    bool isBitImm64() const
+    {
+        return kind() == BitImm64;
+    }
+
+    bool isSomeImm() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BigImm:
+        case BitImm:
+        case BitImm64:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isAddr() const
+    {
+        return kind() == Addr;
+    }
+
+    bool isStack() const
+    {
+        return kind() == Stack;
+    }
+
+    bool isCallArg() const
+    {
+        return kind() == CallArg;
+    }
+
+    bool isIndex() const
+    {
+        return kind() == Index;
+    }
+
+    bool isMemory() const
+    {
+        switch (kind()) {
+        case Addr:
+        case Stack:
+        case CallArg:
+        case Index:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isStackMemory() const;
+
+    bool isRelCond() const
+    {
+        return kind() == RelCond;
+    }
+
+    bool isResCond() const
+    {
+        return kind() == ResCond;
+    }
+
+    bool isDoubleCond() const
+    {
+        return kind() == DoubleCond;
+    }
+
+    bool isCondition() const
+    {
+        switch (kind()) {
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isSpecial() const
+    {
+        return kind() == Special;
+    }
+
+    bool isWidthArg() const
+    {
+        return kind() == WidthArg;
+    }
+
+    bool isAlive() const
+    {
+        return isTmp() || isStack();
+    }
+
+    Air::Tmp tmp() const
+    {
+        ASSERT(kind() == Tmp);
+        return m_base;
+    }
+
+    int64_t value() const
+    {
+        ASSERT(isSomeImm());
+        return m_offset;
+    }
+
+    template
+    bool isRepresentableAs() const
+    {
+        return B3::isRepresentableAs(value());
+    }
+    
+    static bool isRepresentableAs(Width width, Signedness signedness, int64_t value)
+    {
+        switch (signedness) {
+        case Signed:
+            switch (width) {
+            case Width8:
+                return B3::isRepresentableAs(value);
+            case Width16:
+                return B3::isRepresentableAs(value);
+            case Width32:
+                return B3::isRepresentableAs(value);
+            case Width64:
+                return B3::isRepresentableAs(value);
+            }
+        case Unsigned:
+            switch (width) {
+            case Width8:
+                return B3::isRepresentableAs(value);
+            case Width16:
+                return B3::isRepresentableAs(value);
+            case Width32:
+                return B3::isRepresentableAs(value);
+            case Width64:
+                return B3::isRepresentableAs(value);
+            }
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool isRepresentableAs(Width, Signedness) const;
+    
+    static int64_t castToType(Width width, Signedness signedness, int64_t value)
+    {
+        switch (signedness) {
+        case Signed:
+            switch (width) {
+            case Width8:
+                return static_cast(value);
+            case Width16:
+                return static_cast(value);
+            case Width32:
+                return static_cast(value);
+            case Width64:
+                return static_cast(value);
+            }
+        case Unsigned:
+            switch (width) {
+            case Width8:
+                return static_cast(value);
+            case Width16:
+                return static_cast(value);
+            case Width32:
+                return static_cast(value);
+            case Width64:
+                return static_cast(value);
+            }
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    template
+    T asNumber() const
+    {
+        return static_cast(value());
+    }
+
+    void* pointerValue() const
+    {
+        ASSERT(kind() == BigImm);
+        return bitwise_cast(static_cast(m_offset));
+    }
+
+    Air::Tmp base() const
+    {
+        ASSERT(kind() == Addr || kind() == Index);
+        return m_base;
+    }
+
+    bool hasOffset() const { return isMemory(); }
+    
+    int32_t offset() const
+    {
+        if (kind() == Stack)
+            return static_cast(m_scale);
+        ASSERT(kind() == Addr || kind() == CallArg || kind() == Index);
+        return static_cast(m_offset);
+    }
+
+    StackSlot* stackSlot() const
+    {
+        ASSERT(kind() == Stack);
+        return bitwise_cast(m_offset);
+    }
+
+    Air::Tmp index() const
+    {
+        ASSERT(kind() == Index);
+        return m_index;
+    }
+
+    unsigned scale() const
+    {
+        ASSERT(kind() == Index);
+        return m_scale;
+    }
+
+    unsigned logScale() const
+    {
+        return logScale(scale());
+    }
+
+    Air::Special* special() const
+    {
+        ASSERT(kind() == Special);
+        return bitwise_cast(m_offset);
+    }
+
+    Width width() const
+    {
+        ASSERT(kind() == WidthArg);
+        return static_cast(m_offset);
+    }
+
+    bool isGPTmp() const
+    {
+        return isTmp() && tmp().isGP();
+    }
+
+    bool isFPTmp() const
+    {
+        return isTmp() && tmp().isFP();
+    }
+    
+    // Tells us if this Arg can be used in a position that requires a GP value.
+    bool isGP() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BigImm:
+        case BitImm:
+        case BitImm64:
+        case Addr:
+        case Index:
+        case Stack:
+        case CallArg:
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+            return true;
+        case Tmp:
+            return isGPTmp();
+        case Invalid:
+            return false;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    // Tells us if this Arg can be used in a position that requires a FP value.
+    bool isFP() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BitImm:
+        case BitImm64:
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+        case Invalid:
+            return false;
+        case Addr:
+        case Index:
+        case Stack:
+        case CallArg:
+        case BigImm: // Yes, we allow BigImm as a double immediate. We use this for implementing stackmaps.
+            return true;
+        case Tmp:
+            return isFPTmp();
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool hasType() const
+    {
+        switch (kind()) {
+        case Imm:
+        case BitImm:
+        case BitImm64:
+        case Special:
+        case Tmp:
+            return true;
+        default:
+            return false;
+        }
+    }
+    
+    // The type is ambiguous for some arg kinds. Call with care.
+    Type type() const
+    {
+        return isGP() ? GP : FP;
+    }
+
+    bool isType(Type type) const
+    {
+        switch (type) {
+        case GP:
+            return isGP();
+        case FP:
+            return isFP();
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    bool canRepresent(Value* value) const;
+
+    bool isCompatibleType(const Arg& other) const;
+
+    bool isGPR() const
+    {
+        return isTmp() && tmp().isGPR();
+    }
+
+    GPRReg gpr() const
+    {
+        return tmp().gpr();
+    }
+
+    bool isFPR() const
+    {
+        return isTmp() && tmp().isFPR();
+    }
+
+    FPRReg fpr() const
+    {
+        return tmp().fpr();
+    }
+    
+    bool isReg() const
+    {
+        return isTmp() && tmp().isReg();
+    }
+
+    Reg reg() const
+    {
+        return tmp().reg();
+    }
+
+    unsigned gpTmpIndex() const
+    {
+        return tmp().gpTmpIndex();
+    }
+
+    unsigned fpTmpIndex() const
+    {
+        return tmp().fpTmpIndex();
+    }
+
+    unsigned tmpIndex() const
+    {
+        return tmp().tmpIndex();
+    }
+
+    static bool isValidImmForm(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return isUInt12(value);
+        return false;
+    }
+
+    static bool isValidBitImmForm(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return ARM64LogicalImmediate::create32(value).isValid();
+        return false;
+    }
+
+    static bool isValidBitImm64Form(int64_t value)
+    {
+        if (isX86())
+            return B3::isRepresentableAs(value);
+        if (isARM64())
+            return ARM64LogicalImmediate::create64(value).isValid();
+        return false;
+    }
+
+    static bool isValidAddrForm(int32_t offset, std::optional width = std::nullopt)
+    {
+        if (isX86())
+            return true;
+        if (isARM64()) {
+            if (!width)
+                return true;
+
+            if (isValidSignedImm9(offset))
+                return true;
+
+            switch (*width) {
+            case Width8:
+                return isValidScaledUImm12<8>(offset);
+            case Width16:
+                return isValidScaledUImm12<16>(offset);
+            case Width32:
+                return isValidScaledUImm12<32>(offset);
+            case Width64:
+                return isValidScaledUImm12<64>(offset);
+            }
+        }
+        return false;
+    }
+
+    static bool isValidIndexForm(unsigned scale, int32_t offset, std::optional width = std::nullopt)
+    {
+        if (!isValidScale(scale, width))
+            return false;
+        if (isX86())
+            return true;
+        if (isARM64())
+            return !offset;
+        return false;
+    }
+
+    // If you don't pass a width then this optimistically assumes that you're using the right width. But
+    // the width is relevant to validity, so passing a null width is only useful for assertions. Don't
+    // pass null widths when cascading through Args in the instruction selector!
+    bool isValidForm(std::optional width = std::nullopt) const
+    {
+        switch (kind()) {
+        case Invalid:
+            return false;
+        case Tmp:
+            return true;
+        case Imm:
+            return isValidImmForm(value());
+        case BigImm:
+            return true;
+        case BitImm:
+            return isValidBitImmForm(value());
+        case BitImm64:
+            return isValidBitImm64Form(value());
+        case Addr:
+        case Stack:
+        case CallArg:
+            return isValidAddrForm(offset(), width);
+        case Index:
+            return isValidIndexForm(scale(), offset(), width);
+        case RelCond:
+        case ResCond:
+        case DoubleCond:
+        case Special:
+        case WidthArg:
+            return true;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    template
+    void forEachTmpFast(const Functor& functor)
+    {
+        switch (m_kind) {
+        case Tmp:
+        case Addr:
+            functor(m_base);
+            break;
+        case Index:
+            functor(m_base);
+            functor(m_index);
+            break;
+        default:
+            break;
+        }
+    }
+
+    bool usesTmp(Air::Tmp tmp) const;
+
+    template
+    bool is() const;
+
+    template
+    Thing as() const;
+
+    template
+    void forEachFast(const Functor&);
+
+    template
+    void forEach(Role, Type, Width, const Functor&);
+
+    // This is smart enough to know that an address arg in a Def or UseDef rule will use its
+    // tmps and never def them. For example, this:
+    //
+    // mov %rax, (%rcx)
+    //
+    // This defs (%rcx) but uses %rcx.
+    template
+    void forEachTmp(Role argRole, Type argType, Width argWidth, const Functor& functor)
+    {
+        switch (m_kind) {
+        case Tmp:
+            ASSERT(isAnyUse(argRole) || isAnyDef(argRole));
+            functor(m_base, argRole, argType, argWidth);
+            break;
+        case Addr:
+            functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            break;
+        case Index:
+            functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            functor(m_index, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
+            break;
+        default:
+            break;
+        }
+    }
+
+    MacroAssembler::TrustedImm32 asTrustedImm32() const
+    {
+        ASSERT(isImm() || isBitImm());
+        return MacroAssembler::TrustedImm32(static_cast(m_offset));
+    }
+
+#if USE(JSVALUE64)
+    MacroAssembler::TrustedImm64 asTrustedImm64() const
+    {
+        ASSERT(isBigImm() || isBitImm64());
+        return MacroAssembler::TrustedImm64(value());
+    }
+#endif
+
+    MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
+    {
+        if (is64Bit())
+            ASSERT(isBigImm());
+        else
+            ASSERT(isImm());
+        return MacroAssembler::TrustedImmPtr(pointerValue());
+    }
+
+    MacroAssembler::Address asAddress() const
+    {
+        ASSERT(isAddr());
+        return MacroAssembler::Address(m_base.gpr(), static_cast(m_offset));
+    }
+
+    MacroAssembler::BaseIndex asBaseIndex() const
+    {
+        ASSERT(isIndex());
+        return MacroAssembler::BaseIndex(
+            m_base.gpr(), m_index.gpr(), static_cast(logScale()),
+            static_cast(m_offset));
+    }
+
+    MacroAssembler::RelationalCondition asRelationalCondition() const
+    {
+        ASSERT(isRelCond());
+        return static_cast(m_offset);
+    }
+
+    MacroAssembler::ResultCondition asResultCondition() const
+    {
+        ASSERT(isResCond());
+        return static_cast(m_offset);
+    }
+
+    MacroAssembler::DoubleCondition asDoubleCondition() const
+    {
+        ASSERT(isDoubleCond());
+        return static_cast(m_offset);
+    }
+    
+    // Tells you if the Arg is invertible. Only condition arguments are invertible, and even for those, there
+    // are a few exceptions - notably Overflow and Signed.
+    bool isInvertible() const
+    {
+        switch (kind()) {
+        case RelCond:
+        case DoubleCond:
+            return true;
+        case ResCond:
+            return MacroAssembler::isInvertible(asResultCondition());
+        default:
+            return false;
+        }
+    }
+
+    // This is valid for condition arguments. It will invert them.
+    Arg inverted(bool inverted = true) const
+    {
+        if (!inverted)
+            return *this;
+        switch (kind()) {
+        case RelCond:
+            return relCond(MacroAssembler::invert(asRelationalCondition()));
+        case ResCond:
+            return resCond(MacroAssembler::invert(asResultCondition()));
+        case DoubleCond:
+            return doubleCond(MacroAssembler::invert(asDoubleCondition()));
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Arg();
+        }
+    }
+
+    Arg flipped(bool flipped = true) const
+    {
+        if (!flipped)
+            return Arg();
+        return relCond(MacroAssembler::flip(asRelationalCondition()));
+    }
+
+    bool isSignedCond() const
+    {
+        return isRelCond() && MacroAssembler::isSigned(asRelationalCondition());
+    }
+
+    bool isUnsignedCond() const
+    {
+        return isRelCond() && MacroAssembler::isUnsigned(asRelationalCondition());
+    }
+
+    // This computes a hash for comparing this to JSAir's Arg.
+    unsigned jsHash() const;
+    
+    void dump(PrintStream&) const;
+
+    Arg(WTF::HashTableDeletedValueType)
+        : m_base(WTF::HashTableDeletedValue)
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Arg(WTF::HashTableDeletedValue);
+    }
+
+    unsigned hash() const
+    {
+        // This really doesn't have to be that great.
+        return WTF::IntHash::hash(m_offset) + m_kind + m_scale + m_base.hash() +
+            m_index.hash();
+    }
+
+private:
+    int64_t m_offset { 0 };
+    Kind m_kind { Invalid };
+    int32_t m_scale { 1 };
+    Air::Tmp m_base;
+    Air::Tmp m_index;
+};
+
+struct ArgHash {
+    static unsigned hash(const Arg& key) { return key.hash(); }
+    static bool equal(const Arg& a, const Arg& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Kind);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Role);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Type);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Width);
+JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Arg::Signedness);
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::Air::ArgHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits {
+    // Because m_scale is 1 in the empty value.
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirArgInlines.h b/b3/air/AirArgInlines.h
new file mode 100644
index 0000000..73f7d5b
--- /dev/null
+++ b/b3/air/AirArgInlines.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template struct ArgThingHelper;
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isTmp();
+    }
+
+    static Tmp as(const Arg& arg)
+    {
+        if (is(arg))
+            return arg.tmp();
+        return Tmp();
+    }
+
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        arg.forEachTmpFast(functor);
+    }
+
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        arg.forEachTmp(role, type, width, functor);
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg&)
+    {
+        return true;
+    }
+
+    static Arg as(const Arg& arg)
+    {
+        return arg;
+    }
+
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        functor(arg);
+    }
+
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        functor(arg, role, type, width);
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isStack();
+    }
+    
+    static StackSlot* as(const Arg& arg)
+    {
+        return arg.stackSlot();
+    }
+    
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        if (!arg.isStack())
+            return;
+        
+        StackSlot* stackSlot = arg.stackSlot();
+        functor(stackSlot);
+        arg = Arg::stack(stackSlot, arg.offset());
+    }
+    
+    template
+    static void forEach(Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width, const Functor& functor)
+    {
+        if (!arg.isStack())
+            return;
+        
+        StackSlot* stackSlot = arg.stackSlot();
+        
+        // FIXME: This is way too optimistic about the meaning of "Def". It gets lucky for
+        // now because our only use of "Anonymous" stack slots happens to want the optimistic
+        // semantics. We could fix this by just changing the comments that describe the
+        // semantics of "Anonymous".
+        // https://bugs.webkit.org/show_bug.cgi?id=151128
+        
+        functor(stackSlot, role, type, width);
+        arg = Arg::stack(stackSlot, arg.offset());
+    }
+};
+
+template<> struct ArgThingHelper {
+    static bool is(const Arg& arg)
+    {
+        return arg.isReg();
+    }
+    
+    static Reg as(const Arg& arg)
+    {
+        return arg.reg();
+    }
+    
+    template
+    static void forEachFast(Arg& arg, const Functor& functor)
+    {
+        arg.forEachTmpFast(
+            [&] (Tmp& tmp) {
+                if (!tmp.isReg())
+                    return;
+                
+                Reg reg = tmp.reg();
+                functor(reg);
+                tmp = Tmp(reg);
+            });
+    }
+    
+    template
+    static void forEach(Arg& arg, Arg::Role argRole, Arg::Type argType, Arg::Width argWidth, const Functor& functor)
+    {
+        arg.forEachTmp(
+            argRole, argType, argWidth,
+            [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+                if (!tmp.isReg())
+                    return;
+                
+                Reg reg = tmp.reg();
+                functor(reg, role, type, width);
+                tmp = Tmp(reg);
+            });
+    }
+};
+
+template
+bool Arg::is() const
+{
+    return ArgThingHelper::is(*this);
+}
+
+template
+Thing Arg::as() const
+{
+    return ArgThingHelper::as(*this);
+}
+
+template
+void Arg::forEachFast(const Functor& functor)
+{
+    ArgThingHelper::forEachFast(*this, functor);
+}
+
+template
+void Arg::forEach(Role role, Type type, Width width, const Functor& functor)
+{
+    ArgThingHelper::forEach(*this, role, type, width, functor);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirBasicBlock.cpp b/b3/air/AirBasicBlock.cpp
new file mode 100644
index 0000000..fa3ad8e
--- /dev/null
+++ b/b3/air/AirBasicBlock.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirBasicBlock.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3BasicBlockUtils.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const BasicBlock::dumpPrefix = "#";
+
+bool BasicBlock::addPredecessor(BasicBlock* block)
+{
+    return B3::addPredecessor(this, block);
+}
+
+bool BasicBlock::removePredecessor(BasicBlock* block)
+{
+    return B3::removePredecessor(this, block);
+}
+
+bool BasicBlock::replacePredecessor(BasicBlock* from, BasicBlock* to)
+{
+    return B3::replacePredecessor(this, from, to);
+}
+
+void BasicBlock::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix, m_index);
+}
+
+void BasicBlock::deepDump(PrintStream& out) const
+{
+    dumpHeader(out);
+    for (const Inst& inst : *this)
+        out.print("    ", inst, "\n");
+    dumpFooter(out);
+}
+
+void BasicBlock::dumpHeader(PrintStream& out) const
+{
+    out.print("BB", *this, ": ; frequency = ", m_frequency, "\n");
+    if (predecessors().size())
+        out.print("  Predecessors: ", pointerListDump(predecessors()), "\n");
+}
+
+void BasicBlock::dumpFooter(PrintStream& out) const
+{
+    if (successors().size())
+        out.print("  Successors: ", listDump(successors()), "\n");
+}
+
+BasicBlock::BasicBlock(unsigned index, double frequency)
+    : m_index(index)
+    , m_frequency(frequency)
+{
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirBasicBlock.h b/b3/air/AirBasicBlock.h
new file mode 100644
index 0000000..431bd71
--- /dev/null
+++ b/b3/air/AirBasicBlock.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirFrequentedBlock.h"
+#include "AirInst.h"
+#include "B3SuccessorCollection.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BlockInsertionSet;
+class Code;
+class InsertionSet;
+
+class BasicBlock {
+    WTF_MAKE_NONCOPYABLE(BasicBlock);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    static const char* const dumpPrefix;
+
+    typedef Vector InstList;
+    typedef Vector PredecessorList;
+    typedef Vector SuccessorList;
+
+    unsigned index() const { return m_index; }
+
+    // This method is exposed for phases that mess with the layout of basic blocks. Currently that means just
+    // optimizeBlockOrder().
+    void setIndex(unsigned index) { m_index = index; }
+    
+    unsigned size() const { return m_insts.size(); }
+    InstList::iterator begin() { return m_insts.begin(); }
+    InstList::iterator end() { return m_insts.end(); }
+    InstList::const_iterator begin() const { return m_insts.begin(); }
+    InstList::const_iterator end() const { return m_insts.end(); }
+
+    const Inst& at(unsigned index) const { return m_insts[index]; }
+    Inst& at(unsigned index) { return m_insts[index]; }
+
+    Inst* get(unsigned index)
+    {
+        return index < size() ? &at(index) : nullptr;
+    }
+
+    const Inst& last() const { return m_insts.last(); }
+    Inst& last() { return m_insts.last(); }
+
+    void resize(unsigned size) { m_insts.resize(size); }
+
+    const InstList& insts() const { return m_insts; }
+    InstList& insts() { return m_insts; }
+
+    template
+    Inst& appendInst(Inst&& inst)
+    {
+        m_insts.append(std::forward(inst));
+        return m_insts.last();
+    }
+
+    template
+    Inst& append(Arguments&&... arguments)
+    {
+        m_insts.append(Inst(std::forward(arguments)...));
+        return m_insts.last();
+    }
+
+    // The "0" case is the case to which the branch jumps, so the "then" case. The "1" case is the
+    // "else" case, and is used to represent the fall-through of a conditional branch.
+    unsigned numSuccessors() const { return m_successors.size(); }
+    FrequentedBlock successor(unsigned index) const { return m_successors[index]; }
+    FrequentedBlock& successor(unsigned index) { return m_successors[index]; }
+    const SuccessorList& successors() const { return m_successors; }
+    SuccessorList& successors() { return m_successors; }
+
+    BasicBlock* successorBlock(unsigned index) const { return successor(index).block(); }
+    BasicBlock*& successorBlock(unsigned index) { return successor(index).block(); }
+    SuccessorCollection successorBlocks()
+    {
+        return SuccessorCollection(m_successors);
+    }
+    SuccessorCollection successorBlocks() const
+    {
+        return SuccessorCollection(m_successors);
+    }
+
+    unsigned numPredecessors() const { return m_predecessors.size(); }
+    BasicBlock* predecessor(unsigned index) const { return m_predecessors[index]; }
+    BasicBlock*& predecessor(unsigned index) { return m_predecessors[index]; }
+    const PredecessorList& predecessors() const { return m_predecessors; }
+    PredecessorList& predecessors() { return m_predecessors; }
+
+    bool addPredecessor(BasicBlock*);
+    bool removePredecessor(BasicBlock*);
+    bool replacePredecessor(BasicBlock* from, BasicBlock* to);
+    bool containsPredecessor(BasicBlock* predecessor) const { return m_predecessors.contains(predecessor); }
+
+    double frequency() const { return m_frequency; }
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+    void dumpHeader(PrintStream&) const;
+    void dumpFooter(PrintStream&) const;
+
+private:
+    friend class BlockInsertionSet;
+    friend class Code;
+    friend class InsertionSet;
+    
+    BasicBlock(unsigned index, double frequency);
+
+    unsigned m_index;
+    InstList m_insts;
+    SuccessorList m_successors;
+    PredecessorList m_predecessors;
+    double m_frequency;
+};
+
+class DeepBasicBlockDump {
+public:
+    DeepBasicBlockDump(const BasicBlock* block)
+        : m_block(block)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_block)
+            m_block->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const BasicBlock* m_block;
+};
+
+inline DeepBasicBlockDump deepDump(const BasicBlock* block)
+{
+    return DeepBasicBlockDump(block);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirBlockWorklist.h b/b3/air/AirBlockWorklist.h
new file mode 100644
index 0000000..ba231a9
--- /dev/null
+++ b/b3/air/AirBlockWorklist.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "B3BlockWorklist.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+typedef GraphNodeWorklist> BlockWorklist;
+
+// When you say BlockWith you should read it as "block with an int".
+template using BlockWith = GraphNodeWith;
+
+// Extended block worklist is useful for enqueueing some meta-data along with the block. It also
+// permits forcibly enqueueing things even if the block has already been seen. It's useful for
+// things like building a spanning tree, in which case T (the auxiliary payload) would be the
+// successor index.
+template using ExtendedBlockWorklist = ExtendedGraphNodeWorklist>;
+
+typedef GraphNodeWithOrder BlockWithOrder;
+
+typedef PostOrderGraphNodeWorklist> PostOrderBlockWorklist;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCCallSpecial.cpp b/b3/air/AirCCallSpecial.cpp
new file mode 100644
index 0000000..f1b6d71
--- /dev/null
+++ b/b3/air/AirCCallSpecial.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCCallSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+CCallSpecial::CCallSpecial()
+{
+    m_clobberedRegs = RegisterSet::allRegisters();
+    m_clobberedRegs.exclude(RegisterSet::stackRegisters());
+    m_clobberedRegs.exclude(RegisterSet::reservedHardwareRegisters());
+    m_clobberedRegs.exclude(RegisterSet::calleeSaveRegisters());
+    m_clobberedRegs.clear(GPRInfo::returnValueGPR);
+    m_clobberedRegs.clear(GPRInfo::returnValueGPR2);
+    m_clobberedRegs.clear(FPRInfo::returnValueFPR);
+}
+
+CCallSpecial::~CCallSpecial()
+{
+}
+
+void CCallSpecial::forEachArg(Inst& inst, const ScopedLambda& callback)
+{
+    for (unsigned i = 0; i < numCalleeArgs; ++i)
+        callback(inst.args[calleeArgOffset + i], Arg::Use, Arg::GP, Arg::pointerWidth());
+    for (unsigned i = 0; i < numReturnGPArgs; ++i)
+        callback(inst.args[returnGPArgOffset + i], Arg::Def, Arg::GP, Arg::pointerWidth());
+    for (unsigned i = 0; i < numReturnFPArgs; ++i)
+        callback(inst.args[returnFPArgOffset + i], Arg::Def, Arg::FP, Arg::Width64);
+    
+    for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+        // For the type, we can just query the arg's type. The arg will have a type, because we
+        // require these args to be argument registers.
+        Arg::Type type = inst.args[i].type();
+        callback(inst.args[i], Arg::Use, type, Arg::conservativeWidth(type));
+    }
+}
+
+bool CCallSpecial::isValid(Inst& inst)
+{
+    if (inst.args.size() < argArgOffset)
+        return false;
+
+    for (unsigned i = 0; i < numCalleeArgs; ++i) {
+        Arg& arg = inst.args[i + calleeArgOffset];
+        if (!arg.isGP())
+            return false;
+        switch (arg.kind()) {
+        case Arg::Imm:
+            if (is32Bit())
+                break;
+            return false;
+        case Arg::BigImm:
+            if (is64Bit())
+                break;
+            return false;
+        case Arg::Tmp:
+        case Arg::Addr:
+        case Arg::Stack:
+        case Arg::CallArg:
+            break;
+        default:
+            return false;
+        }
+    }
+
+    // Return args need to be exact.
+    if (inst.args[returnGPArgOffset + 0] != Tmp(GPRInfo::returnValueGPR))
+        return false;
+    if (inst.args[returnGPArgOffset + 1] != Tmp(GPRInfo::returnValueGPR2))
+        return false;
+    if (inst.args[returnFPArgOffset + 0] != Tmp(FPRInfo::returnValueFPR))
+        return false;
+
+    for (unsigned i = argArgOffset; i < inst.args.size(); ++i) {
+        if (!inst.args[i].isReg())
+            return false;
+
+        if (inst.args[i] == Tmp(scratchRegister))
+            return false;
+    }
+    return true;
+}
+
+bool CCallSpecial::admitsStack(Inst&, unsigned argIndex)
+{
+    // The callee can be on the stack.
+    if (argIndex == calleeArgOffset)
+        return true;
+    
+    return false;
+}
+
+void CCallSpecial::reportUsedRegisters(Inst&, const RegisterSet&)
+{
+}
+
+CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&)
+{
+    switch (inst.args[calleeArgOffset].kind()) {
+    case Arg::Imm:
+    case Arg::BigImm:
+        jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
+        jit.call(scratchRegister);
+        break;
+    case Arg::Tmp:
+        jit.call(inst.args[calleeArgOffset].gpr());
+        break;
+    case Arg::Addr:
+        jit.call(inst.args[calleeArgOffset].asAddress());
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+    return CCallHelpers::Jump();
+}
+
+RegisterSet CCallSpecial::extraEarlyClobberedRegs(Inst&)
+{
+    return m_emptyRegs;
+}
+
+RegisterSet CCallSpecial::extraClobberedRegs(Inst&)
+{
+    return m_clobberedRegs;
+}
+
+void CCallSpecial::dumpImpl(PrintStream& out) const
+{
+    out.print("CCall");
+}
+
+void CCallSpecial::deepDumpImpl(PrintStream& out) const
+{
+    out.print("function call that uses the C calling convention.");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCCallSpecial.h b/b3/air/AirCCallSpecial.h
new file mode 100644
index 0000000..ec909b9
--- /dev/null
+++ b/b3/air/AirCCallSpecial.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirSpecial.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Use this special for constructing a C call. Arg 0 is of course a Special arg that refers to the
+// CCallSpecial object. Arg 1 is the callee, and it can be an ImmPtr, a register, or an address. The
+// next three args - arg 2, arg 3, and arg 4 - hold the return value GPRs and FPR. The remaining args
+// are just the set of argument registers used by this call. For arguments that go to the stack, you
+// have to do the grunt work of doing those stack stores. In fact, the only reason why we specify the
+// argument registers as arguments to a call is so that the liveness analysis can see that they get
+// used here. It would be wrong to automagically report all argument registers as being used because
+// if we had a call that didn't pass them, then they'd appear to be live until some clobber point or
+// the prologue, whichever happened sooner.
+
+class CCallSpecial : public Special {
+public:
+    CCallSpecial();
+    ~CCallSpecial();
+
+    // You cannot use this register to pass arguments. It just so happens that this register is not
+    // used for arguments in the C calling convention. By the way, this is the only thing that causes
+    // this special to be specific to C calls.
+    static const GPRReg scratchRegister = GPRInfo::nonArgGPR0;
+
+protected:
+    void forEachArg(Inst&, const ScopedLambda&) override;
+    bool isValid(Inst&) override;
+    bool admitsStack(Inst&, unsigned argIndex) override;
+    void reportUsedRegisters(Inst&, const RegisterSet&) override;
+    CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) override;
+    RegisterSet extraEarlyClobberedRegs(Inst&) override;
+    RegisterSet extraClobberedRegs(Inst&) override;
+
+    void dumpImpl(PrintStream&) const override;
+    void deepDumpImpl(PrintStream&) const override;
+
+private:
+    static const unsigned specialArgOffset = 0;
+    static const unsigned numSpecialArgs = 1;
+    static const unsigned calleeArgOffset = numSpecialArgs;
+    static const unsigned numCalleeArgs = 1;
+    static const unsigned returnGPArgOffset = numSpecialArgs + numCalleeArgs;
+    static const unsigned numReturnGPArgs = 2;
+    static const unsigned returnFPArgOffset = numSpecialArgs + numCalleeArgs + numReturnGPArgs;
+    static const unsigned numReturnFPArgs = 1;
+    static const unsigned argArgOffset =
+        numSpecialArgs + numCalleeArgs + numReturnGPArgs + numReturnFPArgs;
+    
+    RegisterSet m_clobberedRegs;
+    RegisterSet m_emptyRegs;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCCallingConvention.cpp b/b3/air/AirCCallingConvention.cpp
new file mode 100644
index 0000000..2b6f733
--- /dev/null
+++ b/b3/air/AirCCallingConvention.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCCallingConvention.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "AirCode.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+template
+Arg marshallCCallArgumentImpl(unsigned& argumentCount, unsigned& stackOffset, Value* child)
+{
+    unsigned argumentIndex = argumentCount++;
+    if (argumentIndex < BankInfo::numberOfArgumentRegisters)
+        return Tmp(BankInfo::toArgumentRegister(argumentIndex));
+
+    unsigned slotSize;
+    if (isARM64() && isIOS()) {
+        // Arguments are packed.
+        slotSize = sizeofType(child->type());
+    } else {
+        // Arguments are aligned.
+        slotSize = 8;
+    }
+
+    stackOffset = WTF::roundUpToMultipleOf(slotSize, stackOffset);
+    Arg result = Arg::callArg(stackOffset);
+    stackOffset += slotSize;
+    return result;
+}
+
+Arg marshallCCallArgument(
+    unsigned& gpArgumentCount, unsigned& fpArgumentCount, unsigned& stackOffset, Value* child)
+{
+    switch (Arg::typeForB3Type(child->type())) {
+    case Arg::GP:
+        return marshallCCallArgumentImpl(gpArgumentCount, stackOffset, child);
+    case Arg::FP:
+        return marshallCCallArgumentImpl(fpArgumentCount, stackOffset, child);
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return Arg();
+}
+
+} // anonymous namespace
+
+Vector computeCCallingConvention(Code& code, CCallValue* value)
+{
+    Vector result;
+    result.append(Tmp(CCallSpecial::scratchRegister));
+    unsigned gpArgumentCount = 0;
+    unsigned fpArgumentCount = 0;
+    unsigned stackOffset = 0;
+    for (unsigned i = 1; i < value->numChildren(); ++i) {
+        result.append(
+            marshallCCallArgument(gpArgumentCount, fpArgumentCount, stackOffset, value->child(i)));
+    }
+    code.requestCallArgAreaSizeInBytes(WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset));
+    return result;
+}
+
+Tmp cCallResult(Type type)
+{
+    switch (type) {
+    case Void:
+        return Tmp();
+    case Int32:
+    case Int64:
+        return Tmp(GPRInfo::returnValueGPR);
+    case Float:
+    case Double:
+        return Tmp(FPRInfo::returnValueFPR);
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return Tmp();
+}
+
+Inst buildCCall(Code& code, Value* origin, const Vector& arguments)
+{
+    Inst inst(Patch, origin, Arg::special(code.cCallSpecial()));
+    inst.args.append(arguments[0]);
+    inst.args.append(Tmp(GPRInfo::returnValueGPR));
+    inst.args.append(Tmp(GPRInfo::returnValueGPR2));
+    inst.args.append(Tmp(FPRInfo::returnValueFPR));
+    for (unsigned i = 1; i < arguments.size(); ++i) {
+        Arg arg = arguments[i];
+        if (arg.isTmp())
+            inst.args.append(arg);
+    }
+    return inst;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirCCallingConvention.h b/b3/air/AirCCallingConvention.h
new file mode 100644
index 0000000..76acc29
--- /dev/null
+++ b/b3/air/AirCCallingConvention.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include "B3Type.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class CCallValue;
+
+namespace Air {
+
+class Code;
+
+Vector computeCCallingConvention(Code&, CCallValue*);
+
+Tmp cCallResult(Type);
+
+Inst buildCCall(Code&, Value* origin, const Vector&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCode.cpp b/b3/air/AirCode.cpp
new file mode 100644
index 0000000..79e2c0c
--- /dev/null
+++ b/b3/air/AirCode.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallSpecial.h"
+#include "B3BasicBlockUtils.h"
+#include "B3Procedure.h"
+#include "B3StackSlot.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+Code::Code(Procedure& proc)
+    : m_proc(proc)
+    , m_lastPhaseName("initial")
+{
+    // Come up with initial orderings of registers. The user may replace this with something else.
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            Vector result;
+            RegisterSet all = type == Arg::GP ? RegisterSet::allGPRs() : RegisterSet::allFPRs();
+            all.exclude(RegisterSet::stackRegisters());
+            all.exclude(RegisterSet::reservedHardwareRegisters());
+            RegisterSet calleeSave = RegisterSet::calleeSaveRegisters();
+            all.forEach(
+                [&] (Reg reg) {
+                    if (!calleeSave.get(reg))
+                        result.append(reg);
+                });
+            all.forEach(
+                [&] (Reg reg) {
+                    if (calleeSave.get(reg))
+                        result.append(reg);
+                });
+            setRegsInPriorityOrder(type, result);
+        });
+}
+
+Code::~Code()
+{
+}
+
+void Code::setRegsInPriorityOrder(Arg::Type type, const Vector& regs)
+{
+    regsInPriorityOrderImpl(type) = regs;
+    m_mutableRegs = RegisterSet();
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            for (Reg reg : regsInPriorityOrder(type))
+                m_mutableRegs.set(reg);
+        });
+}
+
+void Code::pinRegister(Reg reg)
+{
+    Vector& regs = regsInPriorityOrderImpl(Arg(Tmp(reg)).type());
+    regs.removeFirst(reg);
+    m_mutableRegs.clear(reg);
+    ASSERT(!regs.contains(reg));
+}
+
+BasicBlock* Code::addBlock(double frequency)
+{
+    std::unique_ptr block(new BasicBlock(m_blocks.size(), frequency));
+    BasicBlock* result = block.get();
+    m_blocks.append(WTFMove(block));
+    return result;
+}
+
+StackSlot* Code::addStackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+{
+    return m_stackSlots.addNew(byteSize, kind, b3Slot);
+}
+
+StackSlot* Code::addStackSlot(B3::StackSlot* b3Slot)
+{
+    return addStackSlot(b3Slot->byteSize(), StackSlotKind::Locked, b3Slot);
+}
+
+Special* Code::addSpecial(std::unique_ptr special)
+{
+    special->m_code = this;
+    return m_specials.add(WTFMove(special));
+}
+
+CCallSpecial* Code::cCallSpecial()
+{
+    if (!m_cCallSpecial) {
+        m_cCallSpecial = static_cast(
+            addSpecial(std::make_unique()));
+    }
+
+    return m_cCallSpecial;
+}
+
+bool Code::isEntrypoint(BasicBlock* block) const
+{
+    if (m_entrypoints.isEmpty())
+        return !block->index();
+    
+    for (const FrequentedBlock& entrypoint : m_entrypoints) {
+        if (entrypoint.block() == block)
+            return true;
+    }
+    return false;
+}
+
+void Code::resetReachability()
+{
+    clearPredecessors(m_blocks);
+    if (m_entrypoints.isEmpty())
+        updatePredecessorsAfter(m_blocks[0].get());
+    else {
+        for (const FrequentedBlock& entrypoint : m_entrypoints)
+            updatePredecessorsAfter(entrypoint.block());
+    }
+    
+    for (auto& block : m_blocks) {
+        if (isBlockDead(block.get()) && !isEntrypoint(block.get()))
+            block = nullptr;
+    }
+}
+
+void Code::dump(PrintStream& out) const
+{
+    if (!m_entrypoints.isEmpty())
+        out.print("Entrypoints: ", listDump(m_entrypoints), "\n");
+    for (BasicBlock* block : *this)
+        out.print(deepDump(block));
+    if (stackSlots().size()) {
+        out.print("Stack slots:\n");
+        for (StackSlot* slot : stackSlots())
+            out.print("    ", pointerDump(slot), ": ", deepDump(slot), "\n");
+    }
+    if (specials().size()) {
+        out.print("Specials:\n");
+        for (Special* special : specials())
+            out.print("    ", deepDump(special), "\n");
+    }
+    if (m_frameSize)
+        out.print("Frame size: ", m_frameSize, "\n");
+    if (m_callArgAreaSize)
+        out.print("Call arg area size: ", m_callArgAreaSize, "\n");
+    if (m_calleeSaveRegisters.size())
+        out.print("Callee saves: ", m_calleeSaveRegisters, "\n");
+}
+
+unsigned Code::findFirstBlockIndex(unsigned index) const
+{
+    while (index < size() && !at(index))
+        index++;
+    return index;
+}
+
+unsigned Code::findNextBlockIndex(unsigned index) const
+{
+    return findFirstBlockIndex(index + 1);
+}
+
+BasicBlock* Code::findNextBlock(BasicBlock* block) const
+{
+    unsigned index = findNextBlockIndex(block->index());
+    if (index < size())
+        return at(index);
+    return nullptr;
+}
+
+void Code::addFastTmp(Tmp tmp)
+{
+    m_fastTmps.add(tmp);
+}
+
+void* Code::addDataSection(size_t size)
+{
+    return m_proc.addDataSection(size);
+}
+
+unsigned Code::jsHash() const
+{
+    unsigned result = 0;
+    
+    for (BasicBlock* block : *this) {
+        result *= 1000001;
+        for (Inst& inst : *block) {
+            result *= 97;
+            result += inst.jsHash();
+        }
+        for (BasicBlock* successor : block->successorBlocks()) {
+            result *= 7;
+            result += successor->index();
+        }
+    }
+    for (StackSlot* slot : stackSlots()) {
+        result *= 101;
+        result += slot->jsHash();
+    }
+    
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCode.h b/b3/air/AirCode.h
new file mode 100644
index 0000000..6d4a147
--- /dev/null
+++ b/b3/air/AirCode.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirBasicBlock.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "AirTmp.h"
+#include "B3SparseCollection.h"
+#include "CCallHelpers.h"
+#include "RegisterAtOffsetList.h"
+#include "StackAlignment.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Procedure;
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wreturn-type"
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+namespace Air {
+
+class BlockInsertionSet;
+class CCallSpecial;
+
+typedef void WasmBoundsCheckGeneratorFunction(CCallHelpers&, GPRReg, unsigned);
+typedef SharedTask WasmBoundsCheckGenerator;
+
+// This is an IR that is very close to the bare metal. It requires about 40x more bytes than the
+// generated machine code - for example if you're generating 1MB of machine code, you need about
+// 40MB of Air.
+
+class Code {
+    WTF_MAKE_NONCOPYABLE(Code);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    ~Code();
+
+    Procedure& proc() { return m_proc; }
+    
+    const Vector& regsInPriorityOrder(Arg::Type type) const
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_gpRegsInPriorityOrder;
+        case Arg::FP:
+            return m_fpRegsInPriorityOrder;
+        }
+        ASSERT_NOT_REACHED();
+    }
+    
+    void setRegsInPriorityOrder(Arg::Type, const Vector&);
+    
+    // This is the set of registers that Air is allowed to emit code to mutate. It's derived from
+    // regsInPriorityOrder. Any registers not in this set are said to be "pinned".
+    const RegisterSet& mutableRegs() const { return m_mutableRegs; }
+    
+    bool isPinned(Reg reg) const { return !mutableRegs().get(reg); }
+    
+    void pinRegister(Reg);
+
+    JS_EXPORT_PRIVATE BasicBlock* addBlock(double frequency = 1);
+
+    // Note that you can rely on stack slots always getting indices that are larger than the index
+    // of any prior stack slot. In fact, all stack slots you create in the future will have an index
+    // that is >= stackSlots().size().
+    JS_EXPORT_PRIVATE StackSlot* addStackSlot(
+        unsigned byteSize, StackSlotKind, B3::StackSlot* = nullptr);
+    StackSlot* addStackSlot(B3::StackSlot*);
+
+    Special* addSpecial(std::unique_ptr);
+
+    // This is the special you need to make a C call!
+    CCallSpecial* cCallSpecial();
+
+    Tmp newTmp(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return Tmp::gpTmpForIndex(m_numGPTmps++);
+        case Arg::FP:
+            return Tmp::fpTmpForIndex(m_numFPTmps++);
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    unsigned numTmps(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_numGPTmps;
+        case Arg::FP:
+            return m_numFPTmps;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    unsigned callArgAreaSizeInBytes() const { return m_callArgAreaSize; }
+
+    // You can call this before code generation to force a minimum call arg area size.
+    void requestCallArgAreaSizeInBytes(unsigned size)
+    {
+        m_callArgAreaSize = std::max(
+            m_callArgAreaSize,
+            static_cast(WTF::roundUpToMultipleOf(stackAlignmentBytes(), size)));
+    }
+
+    unsigned frameSize() const { return m_frameSize; }
+
+    // Only phases that do stack allocation are allowed to set this. Currently, only
+    // Air::allocateStack() does this.
+    void setFrameSize(unsigned frameSize)
+    {
+        m_frameSize = frameSize;
+    }
+
+    // Note that this is not the same thing as proc().numEntrypoints(). This value here may be zero
+    // until we lower EntrySwitch.
+    unsigned numEntrypoints() const { return m_entrypoints.size(); }
+    const Vector& entrypoints() const { return m_entrypoints; }
+    const FrequentedBlock& entrypoint(unsigned index) const { return m_entrypoints[index]; }
+    bool isEntrypoint(BasicBlock*) const;
+    
+    // This is used by lowerEntrySwitch().
+    template
+    void setEntrypoints(Vector&& vector)
+    {
+        m_entrypoints = std::forward(vector);
+    }
+    
+    CCallHelpers::Label entrypointLabel(unsigned index) const
+    {
+        return m_entrypointLabels[index];
+    }
+    
+    // This is used by generate().
+    template
+    void setEntrypointLabels(Vector&& vector)
+    {
+        m_entrypointLabels = std::forward(vector);
+    }
+
+    const RegisterAtOffsetList& calleeSaveRegisters() const { return m_calleeSaveRegisters; }
+    RegisterAtOffsetList& calleeSaveRegisters() { return m_calleeSaveRegisters; }
+
+    // Recomputes predecessors and deletes unreachable blocks.
+    void resetReachability();
+
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    unsigned size() const { return m_blocks.size(); }
+    BasicBlock* at(unsigned index) const { return m_blocks[index].get(); }
+    BasicBlock* operator[](unsigned index) const { return at(index); }
+
+    // This is used by phases that optimize the block list. You shouldn't use this unless you really know
+    // what you're doing.
+    Vector>& blockList() { return m_blocks; }
+
+    // Finds the smallest index' such that at(index') != null and index' >= index.
+    JS_EXPORT_PRIVATE unsigned findFirstBlockIndex(unsigned index) const;
+
+    // Finds the smallest index' such that at(index') != null and index' > index.
+    unsigned findNextBlockIndex(unsigned index) const;
+
+    BasicBlock* findNextBlock(BasicBlock*) const;
+
+    class iterator {
+    public:
+        iterator()
+            : m_code(nullptr)
+            , m_index(0)
+        {
+        }
+
+        iterator(const Code& code, unsigned index)
+            : m_code(&code)
+            , m_index(m_code->findFirstBlockIndex(index))
+        {
+        }
+
+        BasicBlock* operator*()
+        {
+            return m_code->at(m_index);
+        }
+
+        iterator& operator++()
+        {
+            m_index = m_code->findFirstBlockIndex(m_index + 1);
+            return *this;
+        }
+
+        bool operator==(const iterator& other) const
+        {
+            return m_index == other.m_index;
+        }
+
+        bool operator!=(const iterator& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        const Code* m_code;
+        unsigned m_index;
+    };
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+
+    const SparseCollection& stackSlots() const { return m_stackSlots; }
+    SparseCollection& stackSlots() { return m_stackSlots; }
+
+    const SparseCollection& specials() const { return m_specials; }
+    SparseCollection& specials() { return m_specials; }
+
+    template
+    void forAllTmps(const Callback& callback) const
+    {
+        for (unsigned i = m_numGPTmps; i--;)
+            callback(Tmp::gpTmpForIndex(i));
+        for (unsigned i = m_numFPTmps; i--;)
+            callback(Tmp::fpTmpForIndex(i));
+    }
+
+    void addFastTmp(Tmp);
+    bool isFastTmp(Tmp tmp) const { return m_fastTmps.contains(tmp); }
+    
+    void* addDataSection(size_t);
+    
+    // The name has to be a string literal, since we don't do any memory management for the string.
+    void setLastPhaseName(const char* name)
+    {
+        m_lastPhaseName = name;
+    }
+
+    const char* lastPhaseName() const { return m_lastPhaseName; }
+
+    void setWasmBoundsCheckGenerator(RefPtr generator)
+    {
+        m_wasmBoundsCheckGenerator = generator;
+    }
+
+    RefPtr wasmBoundsCheckGenerator() const { return m_wasmBoundsCheckGenerator; }
+
+    // This is a hash of the code. You can use this if you want to put code into a hashtable, but
+    // it's mainly for validating the results from JSAir.
+    unsigned jsHash() const;
+
+private:
+    friend class ::JSC::B3::Procedure;
+    friend class BlockInsertionSet;
+    
+    Code(Procedure&);
+
+    Vector& regsInPriorityOrderImpl(Arg::Type type)
+    {
+        switch (type) {
+        case Arg::GP:
+            return m_gpRegsInPriorityOrder;
+        case Arg::FP:
+            return m_fpRegsInPriorityOrder;
+        }
+        ASSERT_NOT_REACHED();
+    }
+
+    Procedure& m_proc; // Some meta-data, like byproducts, is stored in the Procedure.
+    Vector m_gpRegsInPriorityOrder;
+    Vector m_fpRegsInPriorityOrder;
+    RegisterSet m_mutableRegs;
+    SparseCollection m_stackSlots;
+    Vector> m_blocks;
+    SparseCollection m_specials;
+    HashSet m_fastTmps;
+    CCallSpecial* m_cCallSpecial { nullptr };
+    unsigned m_numGPTmps { 0 };
+    unsigned m_numFPTmps { 0 };
+    unsigned m_frameSize { 0 };
+    unsigned m_callArgAreaSize { 0 };
+    RegisterAtOffsetList m_calleeSaveRegisters;
+    Vector m_entrypoints; // This is empty until after lowerEntrySwitch().
+    Vector m_entrypointLabels; // This is empty until code generation.
+    RefPtr m_wasmBoundsCheckGenerator;
+    const char* m_lastPhaseName;
+};
+
+} } } // namespace JSC::B3::Air
+
+#if COMPILER(GCC) && ASSERT_DISABLED
+#pragma GCC diagnostic pop
+#endif // COMPILER(GCC) && ASSERT_DISABLED
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirCustom.cpp b/b3/air/AirCustom.cpp
new file mode 100644
index 0000000..2a2df2f
--- /dev/null
+++ b/b3/air/AirCustom.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirCustom.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool PatchCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() < 1)
+        return false;
+    if (!inst.args[0].isSpecial())
+        return false;
+    if (!inst.args[0].special()->isValid(inst))
+        return false;
+    RegisterSet clobberedEarly = inst.extraEarlyClobberedRegs();
+    RegisterSet clobberedLate = inst.extraClobberedRegs();
+    bool ok = true;
+    inst.forEachTmp(
+        [&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+            if (!tmp.isReg())
+                return;
+            if (Arg::isLateDef(role) || Arg::isLateUse(role))
+                ok &= !clobberedLate.get(tmp.reg());
+            else
+                ok &= !clobberedEarly.get(tmp.reg());
+        });
+    return ok;
+}
+
+bool CCallCustom::isValidForm(Inst& inst)
+{
+    CCallValue* value = inst.origin->as();
+    if (!value)
+        return false;
+
+    if (inst.args.size() != (value->type() == Void ? 0 : 1) + value->numChildren())
+        return false;
+
+    // The arguments can only refer to the stack, tmps, or immediates.
+    for (Arg& arg : inst.args) {
+        if (!arg.isTmp() && !arg.isStackMemory() && !arg.isSomeImm())
+            return false;
+    }
+
+    unsigned offset = 0;
+
+    if (!inst.args[0].isGP())
+        return false;
+
+    // If there is a result then it cannot be an immediate.
+    if (value->type() != Void) {
+        if (inst.args[1].isSomeImm())
+            return false;
+        if (!inst.args[1].canRepresent(value))
+            return false;
+        offset++;
+    }
+
+    for (unsigned i = value->numChildren(); i-- > 1;) {
+        Value* child = value->child(i);
+        Arg arg = inst.args[offset + i];
+        if (!arg.canRepresent(child))
+            return false;
+    }
+
+    return true;
+}
+
+CCallHelpers::Jump CCallCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+    dataLog("FATAL: Unlowered C call: ", inst, "\n");
+    UNREACHABLE_FOR_PLATFORM();
+    return CCallHelpers::Jump();
+}
+
+bool ShuffleCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() % 3)
+        return false;
+
+    // A destination may only appear once. This requirement allows us to avoid the undefined behavior
+    // of having a destination that is supposed to get multiple inputs simultaneously. It also
+    // imposes some interesting constraints on the "shape" of the shuffle. If we treat a shuffle pair
+    // as an edge and the Args as nodes, then the single-destination requirement means that the
+    // shuffle graph consists of two kinds of subgraphs:
+    //
+    // - Spanning trees. We call these shifts. They can be executed as a sequence of Move
+    //   instructions and don't usually require scratch registers.
+    //
+    // - Closed loops. These loops consist of nodes that have one successor and one predecessor, so
+    //   there is no way to "get into" the loop from outside of it. These can be executed using swaps
+    //   or by saving one of the Args to a scratch register and executing it as a shift.
+    HashSet dsts;
+
+    for (unsigned i = 0; i < inst.args.size(); ++i) {
+        Arg arg = inst.args[i];
+        unsigned mode = i % 3;
+
+        if (mode == 2) {
+            // It's the width.
+            if (!arg.isWidthArg())
+                return false;
+            continue;
+        }
+
+        // The source can be an immediate.
+        if (!mode) {
+            if (arg.isSomeImm())
+                continue;
+
+            if (!arg.isCompatibleType(inst.args[i + 1]))
+                return false;
+        } else {
+            ASSERT(mode == 1);
+            if (!dsts.add(arg).isNewEntry)
+                return false;
+        }
+
+        if (arg.isTmp() || arg.isMemory())
+            continue;
+
+        return false;
+    }
+
+    // No destination register may appear in any address expressions. The lowering can't handle it
+    // and it's not useful for the way we end up using Shuffles. Normally, Shuffles only used for
+    // stack addresses and non-stack registers.
+    for (Arg& arg : inst.args) {
+        if (!arg.isMemory())
+            continue;
+        bool ok = true;
+        arg.forEachTmpFast(
+            [&] (Tmp tmp) {
+                if (dsts.contains(tmp))
+                    ok = false;
+            });
+        if (!ok)
+            return false;
+    }
+
+    return true;
+}
+
+CCallHelpers::Jump ShuffleCustom::generate(Inst& inst, CCallHelpers&, GenerationContext&)
+{
+    dataLog("FATAL: Unlowered shuffle: ", inst, "\n");
+    UNREACHABLE_FOR_PLATFORM();
+    return CCallHelpers::Jump();
+}
+
+bool WasmBoundsCheckCustom::isValidForm(Inst& inst)
+{
+    if (inst.args.size() != 2)
+        return false;
+    if (!inst.args[0].isTmp() && !inst.args[0].isSomeImm())
+        return false;
+
+    return inst.args[1].isReg();
+}
+
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirCustom.h b/b3/air/AirCustom.h
new file mode 100644
index 0000000..644b7a1
--- /dev/null
+++ b/b3/air/AirCustom.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirGenerationContext.h"
+#include "AirInst.h"
+#include "AirSpecial.h"
+#include "B3ValueInlines.h"
+#include "B3WasmBoundsCheckValue.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+// This defines the behavior of custom instructions - i.e. those whose behavior cannot be
+// described using AirOpcode.opcodes. If you define an opcode as "custom Foo" in that file, then
+// you will need to create a "struct FooCustom" here that implements the custom behavior
+// methods.
+//
+// The customizability granted by the custom instruction mechanism is strictly less than what
+// you get using the Patch instruction and implementing a Special. However, that path requires
+// allocating a Special object and ensuring that it's the first operand. For many instructions,
+// that is not as convenient as using Custom, which makes the instruction look like any other
+// instruction. Note that both of those extra powers of the Patch instruction happen because we
+// special-case that instruction in many phases and analyses. Non-special-cased behaviors of
+// Patch are implemented using the custom instruction mechanism.
+//
+// Specials are still more flexible if you need to list extra clobbered registers and you'd like
+// that to be expressed as a bitvector rather than an arglist. They are also more flexible if
+// you need to carry extra state around with the instruction. Also, Specials mean that you
+// always have access to Code& even in methods that don't take a GenerationContext.
+
+// Definition of Patch instruction. Patch is used to delegate the behavior of the instruction to the
+// Special object, which will be the first argument to the instruction.
+struct PatchCustom {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        // This is basically bogus, but it works for analyses that model Special as an
+        // immediate.
+        functor(inst.args[0], Arg::Use, Arg::GP, Arg::pointerWidth());
+        
+        inst.args[0].special()->forEachArg(inst, scopedLambda(functor));
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst& inst);
+
+    static bool admitsStack(Inst& inst, unsigned argIndex)
+    {
+        if (!argIndex)
+            return false;
+        return inst.args[0].special()->admitsStack(inst, argIndex);
+    }
+
+    static std::optional shouldTryAliasingDef(Inst& inst)
+    {
+        return inst.args[0].special()->shouldTryAliasingDef(inst);
+    }
+    
+    static bool isTerminal(Inst& inst)
+    {
+        return inst.args[0].special()->isTerminal(inst);
+    }
+
+    static bool hasNonArgEffects(Inst& inst)
+    {
+        return inst.args[0].special()->hasNonArgEffects(inst);
+    }
+
+    static bool hasNonArgNonControlEffects(Inst& inst)
+    {
+        return inst.args[0].special()->hasNonArgNonControlEffects(inst);
+    }
+
+    static CCallHelpers::Jump generate(
+        Inst& inst, CCallHelpers& jit, GenerationContext& context)
+    {
+        return inst.args[0].special()->generate(inst, jit, context);
+    }
+};
+
+template
+struct CommonCustomBase {
+    static bool hasNonArgEffects(Inst& inst)
+    {
+        return Subtype::isTerminal(inst) || Subtype::hasNonArgNonControlEffects(inst);
+    }
+};
+
+// Definition of CCall instruction. CCall is used for hot path C function calls. It's lowered to a
+// Patch with an Air CCallSpecial along with code to marshal instructions. The lowering happens
+// before register allocation, so that the register allocator sees the clobbers.
+struct CCallCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        Value* value = inst.origin;
+
+        unsigned index = 0;
+
+        functor(inst.args[index++], Arg::Use, Arg::GP, Arg::pointerWidth()); // callee
+        
+        if (value->type() != Void) {
+            functor(
+                inst.args[index++], Arg::Def,
+                Arg::typeForB3Type(value->type()),
+                Arg::widthForB3Type(value->type()));
+        }
+
+        for (unsigned i = 1; i < value->numChildren(); ++i) {
+            Value* child = value->child(i);
+            functor(
+                inst.args[index++], Arg::Use,
+                Arg::typeForB3Type(child->type()),
+                Arg::widthForB3Type(child->type()));
+        }
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return true;
+    }
+    
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return true;
+    }
+
+    // This just crashes, since we expect C calls to be lowered before generation.
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct ColdCCallCustom : CCallCustom {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        // This is just like a call, but uses become cold.
+        CCallCustom::forEachArg(
+            inst,
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                functor(arg, Arg::cooled(role), type, width);
+            });
+    }
+};
+
+struct ShuffleCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Functor& functor)
+    {
+        unsigned limit = inst.args.size() / 3 * 3;
+        for (unsigned i = 0; i < limit; i += 3) {
+            Arg& src = inst.args[i + 0];
+            Arg& dst = inst.args[i + 1];
+            Arg& widthArg = inst.args[i + 2];
+            Arg::Width width = widthArg.width();
+            Arg::Type type = src.isGP() && dst.isGP() ? Arg::GP : Arg::FP;
+            functor(src, Arg::Use, type, width);
+            functor(dst, Arg::Def, type, width);
+            functor(widthArg, Arg::Use, Arg::GP, Arg::Width8);
+        }
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+    
+    static bool admitsStack(Inst&, unsigned index)
+    {
+        switch (index % 3) {
+        case 0:
+        case 1:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return false;
+    }
+
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&);
+};
+
+struct EntrySwitchCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst&, const Func&)
+    {
+    }
+    
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return !sizeof...(Arguments);
+    }
+    
+    static bool isValidForm(Inst& inst)
+    {
+        return inst.args.isEmpty();
+    }
+    
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return false;
+    }
+    
+    static bool isTerminal(Inst&)
+    {
+        return true;
+    }
+    
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return false;
+    }
+
+    static CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&)
+    {
+        // This should never be reached because we should have lowered EntrySwitch before
+        // generation.
+        UNREACHABLE_FOR_PLATFORM();
+        return CCallHelpers::Jump();
+    }
+};
+
+struct WasmBoundsCheckCustom : public CommonCustomBase {
+    template
+    static void forEachArg(Inst& inst, const Func& functor)
+    {
+        functor(inst.args[0], Arg::Use, Arg::GP, Arg::Width64);
+        functor(inst.args[1], Arg::Use, Arg::GP, Arg::Width64);
+    }
+
+    template
+    static bool isValidFormStatic(Arguments...)
+    {
+        return false;
+    }
+
+    static bool isValidForm(Inst&);
+
+    static bool admitsStack(Inst&, unsigned)
+    {
+        return false;
+    }
+
+    static bool isTerminal(Inst&)
+    {
+        return false;
+    }
+    
+    static bool hasNonArgNonControlEffects(Inst&)
+    {
+        return true;
+    }
+
+    static CCallHelpers::Jump generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
+    {
+        WasmBoundsCheckValue* value = inst.origin->as();
+        CCallHelpers::Jump outOfBounds = Inst(Air::Branch64, value, Arg::relCond(CCallHelpers::AboveOrEqual), inst.args[0], inst.args[1]).generate(jit, context);
+
+        context.latePaths.append(createSharedTask(
+            [=] (CCallHelpers& jit, Air::GenerationContext&) {
+                outOfBounds.link(&jit);
+                context.code->wasmBoundsCheckGenerator()->run(jit, value->pinnedGPR(), value->offset());
+            }));
+
+        // We said we were not a terminal.
+        return CCallHelpers::Jump();
+    }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirDumpAsJS.cpp b/b3/air/AirDumpAsJS.cpp
new file mode 100644
index 0000000..3d8d6fb
--- /dev/null
+++ b/b3/air/AirDumpAsJS.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirDumpAsJS.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+CString varNameForBlockAtIndex(unsigned index)
+{
+    return toCString("bb", index);
+}
+
+CString varName(BasicBlock* block)
+{
+    return varNameForBlockAtIndex(block->index());
+}
+
+CString varNameForStackSlotAtIndex(unsigned index)
+{
+    return toCString("slot", index);
+}
+
+CString varName(StackSlot* slot)
+{
+    return varNameForStackSlotAtIndex(slot->index());
+}
+
+CString varName(Reg reg)
+{
+    return toCString("Reg.", reg.debugName());
+}
+
+CString varNameForTmpWithTypeAndIndex(Arg::Type type, unsigned index)
+{
+    return toCString(type == Arg::FP ? "f" : "", "tmp", index);
+}
+
+CString varName(Tmp tmp)
+{
+    if (tmp.isReg())
+        return varName(tmp.reg());
+    return varNameForTmpWithTypeAndIndex(Arg(tmp).type(), tmp.tmpIndex());
+}
+
+} // anonymous namespace
+
+void dumpAsJS(Code& code, PrintStream& out)
+{
+    out.println("let code = new Code();");
+    
+    for (unsigned i = 0; i < code.size(); ++i)
+        out.println("let ", varNameForBlockAtIndex(i), " = code.addBlock();");
+    
+    out.println("let hash;");
+
+    for (unsigned i = 0; i < code.stackSlots().size(); ++i) {
+        StackSlot* slot = code.stackSlots()[i];
+        if (slot) {
+            out.println("let ", varName(slot), " = code.addStackSlot(", slot->byteSize(), ", ", slot->kind(), ");");
+            if (slot->offsetFromFP())
+                out.println(varName(slot), ".setOffsetFromFP(", slot->offsetFromFP(), ");");
+            out.println("hash = ", varName(slot), ".hash();");
+            out.println("if (hash != ", slot->jsHash(), ")");
+            out.println("    throw new Error(\"Bad hash: \" + hash);");
+        } else
+            out.println("code.addStackSlot(1, Spill);");
+    }
+    
+    Arg::forEachType(
+        [&] (Arg::Type type) {
+            for (unsigned i = code.numTmps(type); i--;) {
+                out.println(
+                    "let ", varNameForTmpWithTypeAndIndex(type, i), " = code.newTmp(", type, ");");
+            }
+        });
+    
+    out.println("let inst;");
+    out.println("let arg;");
+    
+    for (BasicBlock* block : code) {
+        for (FrequentedBlock successor : block->successors()) {
+            out.println(
+                varName(block), ".successors.push(new FrequentedBlock(",
+                varName(successor.block()), ", ", successor.frequency(), "));");
+        }
+        
+        for (BasicBlock* predecessor : block->predecessors())
+            out.println(varName(block), ".predecessors.push(", varName(predecessor), ");");
+        
+        for (Inst& inst : *block) {
+            // FIXME: This should do something for flags.
+            // https://bugs.webkit.org/show_bug.cgi?id=162751
+            out.println("inst = new Inst(", inst.kind.opcode, ");");
+            
+            inst.forEachArg(
+                [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                    switch (arg.kind()) {
+                    case Arg::Invalid:
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                        
+                    case Arg::Tmp:
+                        out.println("arg = Arg.createTmp(", varName(arg.tmp()), ");");
+                        break;
+                        
+                    case Arg::Imm:
+                        out.println("arg = Arg.createImm(", arg.value(), ");");
+                        break;
+                        
+                    case Arg::BigImm:
+                        out.println(
+                            "arg = Arg.createBigImm(",
+                            static_cast(arg.value()), ", ",
+                            static_cast(arg.value() >> 32), ");");
+                        break;
+                        
+                    case Arg::BitImm:
+                        out.println("arg = Arg.createBitImm(", arg.value(), ");");
+                        break;
+                        
+                    case Arg::BitImm64:
+                        out.println(
+                            "arg = Arg.createBitImm64(",
+                            static_cast(arg.value()), ", ",
+                            static_cast(arg.value() >> 32), ");");
+                        break;
+                        
+                    case Arg::Addr:
+                        out.println(
+                            "arg = Arg.createAddr(", varName(arg.base()), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::Stack:
+                        out.println(
+                            "arg = Arg.createStack(", varName(arg.stackSlot()), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::CallArg:
+                        out.println("arg = Arg.createCallArg(", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::Index:
+                        out.println(
+                            "arg = Arg.createIndex(", varName(arg.base()), ", ",
+                            varName(arg.index()), ", ", arg.scale(), ", ", arg.offset(), ");");
+                        break;
+                        
+                    case Arg::RelCond:
+                        out.println("arg = Arg.createRelCond(", arg.asRelationalCondition(), ");");
+                        break;
+                        
+                    case Arg::ResCond:
+                        out.println("arg = Arg.createResCond(", arg.asResultCondition(), ");");
+                        break;
+                        
+                    case Arg::DoubleCond:
+                        out.println("arg = Arg.createDoubleCond(", arg.asDoubleCondition(), ");");
+                        break;
+                        
+                    case Arg::Special:
+                        out.println("arg = Arg.createSpecial();");
+                        break;
+                        
+                    case Arg::WidthArg:
+                        out.println("arg = Arg.createWidthArg(", arg.width(), ");");
+                        break;
+                    }
+                    
+                    out.println("inst.args.push(arg);");
+                });
+            
+            if (inst.kind.opcode == Patch) {
+                if (inst.hasNonArgEffects())
+                    out.println("inst.patchHasNonArgEffects = true;");
+                
+                out.println("inst.extraEarlyClobberedRegs = new Set();");
+                out.println("inst.extraClobberedRegs = new Set();");
+                inst.extraEarlyClobberedRegs().forEach(
+                    [&] (Reg reg) {
+                        out.println("inst.extraEarlyClobberedRegs.add(", varName(reg), ");");
+                    });
+                inst.extraClobberedRegs().forEach(
+                    [&] (Reg reg) {
+                        out.println("inst.extraClobberedRegs.add(", varName(reg), ");");
+                    });
+                
+                out.println("inst.patchArgData = [];");
+                inst.forEachArg(
+                    [&] (Arg&, Arg::Role role, Arg::Type type, Arg::Width width) {
+                        out.println(
+                            "inst.patchArgData.push({role: Arg.", role, ", type: ", type,
+                            ", width: ", width, "});");
+                    });
+            }
+            
+            if (inst.kind.opcode == CCall || inst.kind.opcode == ColdCCall) {
+                out.println("inst.cCallType = ", inst.origin->type());
+                out.println("inst.cCallArgTypes = [];");
+                for (unsigned i = 1; i < inst.origin->numChildren(); ++i)
+                    out.println("inst.cCallArgTypes.push(", inst.origin->child(i)->type(), ");");
+            }
+            
+            out.println("hash = inst.hash();");
+            out.println("if (hash != ", inst.jsHash(), ")");
+            out.println("    throw new Error(\"Bad hash: \" + hash);");
+            
+            out.println(varName(block), ".append(inst);");
+        }
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirDumpAsJS.h b/b3/air/AirDumpAsJS.h
new file mode 100644
index 0000000..8895f58
--- /dev/null
+++ b/b3/air/AirDumpAsJS.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is used for benchmarking. Various operations on Air are interesting from a benchmarking
+// standpoint. We can write some Air phases in JS and then use that to benchmark JS. The benchmark
+// is called JSAir, and it's in PerformanceTests/JSAir.
+void dumpAsJS(Code&, PrintStream&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirEliminateDeadCode.cpp b/b3/air/AirEliminateDeadCode.cpp
new file mode 100644
index 0000000..ca36af9
--- /dev/null
+++ b/b3/air/AirEliminateDeadCode.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirEliminateDeadCode.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool eliminateDeadCode(Code& code)
+{
+    PhaseScope phaseScope(code, "eliminateDeadCode");
+
+    HashSet liveTmps;
+    IndexSet liveStackSlots;
+    bool changed;
+
+    auto isArgLive = [&] (const Arg& arg) -> bool {
+        switch (arg.kind()) {
+        case Arg::Tmp:
+            if (arg.isReg())
+                return true;
+            return liveTmps.contains(arg.tmp());
+        case Arg::Stack:
+            if (arg.stackSlot()->isLocked())
+                return true;
+            return liveStackSlots.contains(arg.stackSlot());
+        default:
+            return true;
+        }
+    };
+
+    auto addLiveArg = [&] (const Arg& arg) -> bool {
+        switch (arg.kind()) {
+        case Arg::Tmp:
+            if (arg.isReg())
+                return false;
+            return liveTmps.add(arg.tmp()).isNewEntry;
+        case Arg::Stack:
+            if (arg.stackSlot()->isLocked())
+                return false;
+            return liveStackSlots.add(arg.stackSlot());
+        default:
+            return false;
+        }
+    };
+
+    auto isInstLive = [&] (Inst& inst) -> bool {
+        if (inst.hasNonArgEffects())
+            return true;
+
+        // This instruction should be presumed dead, if its Args are all dead.
+        bool storesToLive = false;
+        inst.forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                if (!Arg::isAnyDef(role))
+                    return;
+                if (role == Arg::Scratch)
+                    return;
+                storesToLive |= isArgLive(arg);
+            });
+        return storesToLive;
+    };
+
+    auto handleInst = [&] (Inst& inst) {
+        if (!isInstLive(inst))
+            return;
+
+        // We get here if the Inst is live. For simplicity we say that a live instruction forces
+        // liveness upon everything it mentions.
+        for (Arg& arg : inst.args) {
+            changed |= addLiveArg(arg);
+            arg.forEachTmpFast(
+                [&] (Tmp& tmp) {
+                    changed |= addLiveArg(tmp);
+                });
+        }
+    };
+
+    auto runForward = [&] () -> bool {
+        changed = false;
+        for (BasicBlock* block : code) {
+            for (Inst& inst : *block)
+                handleInst(inst);
+        }
+        return changed;
+    };
+
+    auto runBackward = [&] () -> bool {
+        changed = false;
+        for (unsigned blockIndex = code.size(); blockIndex--;) {
+            BasicBlock* block = code[blockIndex];
+            for (unsigned instIndex = block->size(); instIndex--;)
+                handleInst(block->at(instIndex));
+        }
+        return changed;
+    };
+
+    for (;;) {
+        // Propagating backward is most likely to be profitable.
+        if (!runBackward())
+            break;
+        if (!runBackward())
+            break;
+
+        // Occasionally propagating forward greatly reduces the likelihood of pathologies.
+        if (!runForward())
+            break;
+    }
+
+    unsigned removedInstCount = 0;
+    for (BasicBlock* block : code) {
+        removedInstCount += block->insts().removeAllMatching(
+            [&] (Inst& inst) -> bool {
+                return !isInstLive(inst);
+            });
+    }
+
+    return !!removedInstCount;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirEliminateDeadCode.h b/b3/air/AirEliminateDeadCode.h
new file mode 100644
index 0000000..1b718f6
--- /dev/null
+++ b/b3/air/AirEliminateDeadCode.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This eliminates instructions that have no observable effect. These are instructions whose only
+// effect would be storing to some Arg, except that we proved that the location specified by the Arg
+// is never loaded from. The only Args for which we can do such analysis are non-Reg Tmps and
+// anonymous StackSlots.
+
+bool eliminateDeadCode(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirEmitShuffle.cpp b/b3/air/AirEmitShuffle.cpp
new file mode 100644
index 0000000..3184719
--- /dev/null
+++ b/b3/air/AirEmitShuffle.cpp
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirEmitShuffle.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+template
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Functor& functor) {
+    for (Reg reg : code.regsInPriorityOrder(type)) {
+        Tmp tmp(reg);
+        if (functor(tmp))
+            return tmp;
+    }
+    return Tmp();
+}
+
+Tmp findPossibleScratch(Code& code, Arg::Type type, const Arg& arg1, const Arg& arg2) {
+    return findPossibleScratch(
+        code, type,
+        [&] (Tmp tmp) -> bool {
+            return !arg1.usesTmp(tmp) && !arg2.usesTmp(tmp);
+        });
+}
+
+// Example: (a => b, b => a, a => c, b => d)
+struct Rotate {
+    Vector loop; // in the example, this is the loop: (a => b, b => a)
+    Vector fringe; // in the example, these are the associated shifts: (a => c, b => d)
+};
+
+} // anonymous namespace
+
+void ShufflePair::dump(PrintStream& out) const
+{
+    out.print(width(), ":", src(), "=>", dst());
+}
+
+Inst createShuffle(Value* origin, const Vector& pairs)
+{
+    Inst result(Shuffle, origin);
+    for (const ShufflePair& pair : pairs)
+        result.append(pair.src(), pair.dst(), Arg::widthArg(pair.width()));
+    return result;
+}
+
+Vector emitShuffle(
+    Code& code, Vector pairs, std::array scratches, Arg::Type type,
+    Value* origin)
+{
+    if (verbose) {
+        dataLog(
+            "Dealing with pairs: ", listDump(pairs), " and scratches ", scratches[0], ", ",
+            scratches[1], "\n");
+    }
+    
+    pairs.removeAllMatching(
+        [&] (const ShufflePair& pair) -> bool {
+            return pair.src() == pair.dst();
+        });
+    
+    // First validate that this is the kind of shuffle that we know how to deal with.
+#if !ASSERT_DISABLED
+    for (const ShufflePair& pair : pairs) {
+        ASSERT(pair.src().isType(type));
+        ASSERT(pair.dst().isType(type));
+        ASSERT(pair.dst().isTmp() || pair.dst().isMemory());
+    }
+#endif // !ASSERT_DISABLED
+
+    // There are two possible kinds of operations that we will do:
+    //
+    // - Shift. Example: (a => b, b => c). We emit this as "Move b, c; Move a, b". This only requires
+    //   scratch registers if there are memory->memory moves. We want to find as many of these as
+    //   possible because they are cheaper. Note that shifts can involve the same source mentioned
+    //   multiple times. Example: (a => b, a => c, b => d, b => e).
+    //
+    // - Rotate. Example: (a => b, b => a). We want to emit this as "Swap a, b", but that instruction
+    //   may not be available, in which case we may need a scratch register or a scratch memory
+    //   location. A gnarlier example is (a => b, b => c, c => a). We can emit this as "Swap b, c;
+    //   Swap a, b". Note that swapping has to be careful about differing widths.
+    //
+    // Note that a rotate can have "fringe". For example, we might have (a => b, b => a, a =>c,
+    // b => d). This has a rotate loop (a => b, b => a) and some fringe (a => c, b => d). We treat
+    // the whole thing as a single rotate.
+    //
+    // We will find multiple disjoint such operations. We can execute them in any order.
+
+    // We interpret these as Moves that should be executed backwards. All shifts are keyed by their
+    // starting source.
+    HashMap> shifts;
+
+    // We interpret these as Swaps over src()'s that should be executed backwards, i.e. for a list
+    // of size 3 we would do "Swap list[1].src(), list[2].src(); Swap list[0].src(), list[1].src()".
+    // Note that we actually can't do that if the widths don't match or other bad things happen.
+    // But, prior to executing all of that, we need to execute the fringe: the shifts comming off the
+    // rotate.
+    Vector rotates;
+
+    {
+        HashMap> mapping;
+        for (const ShufflePair& pair : pairs)
+            mapping.add(pair.src(), Vector()).iterator->value.append(pair);
+
+        Vector currentPairs;
+
+        while (!mapping.isEmpty()) {
+            ASSERT(currentPairs.isEmpty());
+            Arg originalSrc = mapping.begin()->key;
+            ASSERT(!shifts.contains(originalSrc));
+            if (verbose)
+                dataLog("Processing from ", originalSrc, "\n");
+            
+            GraphNodeWorklist worklist;
+            worklist.push(originalSrc);
+            while (Arg src = worklist.pop()) {
+                HashMap>::iterator iter = mapping.find(src);
+                if (iter == mapping.end()) {
+                    // With a shift it's possible that we previously built the tail of this shift.
+                    // See if that's the case now.
+                    if (verbose)
+                        dataLog("Trying to append shift at ", src, "\n");
+                    currentPairs.appendVector(shifts.take(src));
+                    continue;
+                }
+                Vector pairs = WTFMove(iter->value);
+                mapping.remove(iter);
+
+                for (const ShufflePair& pair : pairs) {
+                    currentPairs.append(pair);
+                    ASSERT(pair.src() == src);
+                    worklist.push(pair.dst());
+                }
+            }
+
+            ASSERT(currentPairs.size());
+            ASSERT(currentPairs[0].src() == originalSrc);
+
+            if (verbose)
+                dataLog("currentPairs = ", listDump(currentPairs), "\n");
+
+            bool isRotate = false;
+            for (const ShufflePair& pair : currentPairs) {
+                if (pair.dst() == originalSrc) {
+                    isRotate = true;
+                    break;
+                }
+            }
+
+            if (isRotate) {
+                if (verbose)
+                    dataLog("It's a rotate.\n");
+                Rotate rotate;
+
+                // The common case is that the rotate does not have fringe. The only way to
+                // check for this is to examine the whole rotate.
+                bool ok;
+                if (currentPairs.last().dst() == originalSrc) {
+                    ok = true;
+                    for (unsigned i = currentPairs.size() - 1; i--;)
+                        ok &= currentPairs[i].dst() == currentPairs[i + 1].src();
+                } else
+                    ok = false;
+                
+                if (ok)
+                    rotate.loop = WTFMove(currentPairs);
+                else {
+                    // This is the slow path. The rotate has fringe.
+                    
+                    HashMap dstMapping;
+                    for (const ShufflePair& pair : currentPairs)
+                        dstMapping.add(pair.dst(), pair);
+
+                    ShufflePair pair = dstMapping.take(originalSrc);
+                    for (;;) {
+                        rotate.loop.append(pair);
+
+                        auto iter = dstMapping.find(pair.src());
+                        if (iter == dstMapping.end())
+                            break;
+                        pair = iter->value;
+                        dstMapping.remove(iter);
+                    }
+
+                    rotate.loop.reverse();
+
+                    // Make sure that the fringe appears in the same order as how it appeared in the
+                    // currentPairs, since that's the DFS order.
+                    for (const ShufflePair& pair : currentPairs) {
+                        // But of course we only include it if it's not in the loop.
+                        if (dstMapping.contains(pair.dst()))
+                            rotate.fringe.append(pair);
+                    }
+                }
+                
+                // If the graph search terminates because we returned to the first source, then the
+                // pair list has to have a very particular shape.
+                for (unsigned i = rotate.loop.size() - 1; i--;)
+                    ASSERT(rotate.loop[i].dst() == rotate.loop[i + 1].src());
+                rotates.append(WTFMove(rotate));
+                currentPairs.resize(0);
+            } else {
+                if (verbose)
+                    dataLog("It's a shift.\n");
+                shifts.add(originalSrc, WTFMove(currentPairs));
+            }
+        }
+    }
+
+    if (verbose) {
+        dataLog("Shifts:\n");
+        for (auto& entry : shifts)
+            dataLog("    ", entry.key, ": ", listDump(entry.value), "\n");
+        dataLog("Rotates:\n");
+        for (auto& rotate : rotates)
+            dataLog("    loop = ", listDump(rotate.loop), ", fringe = ", listDump(rotate.fringe), "\n");
+    }
+
+    // In the worst case, we need two scratch registers. The way we do this is that the client passes
+    // us what scratch registers he happens to have laying around. We will need scratch registers in
+    // the following cases:
+    //
+    // - Shuffle pairs where both src and dst refer to memory.
+    // - Rotate when no Swap instruction is available.
+    //
+    // Lucky for us, we are guaranteed to have extra scratch registers anytime we have a Shift that
+    // ends with a register. We search for such a register right now.
+
+    auto moveForWidth = [&] (Arg::Width width) -> Opcode {
+        switch (width) {
+        case Arg::Width32:
+            return type == Arg::GP ? Move32 : MoveFloat;
+        case Arg::Width64:
+            return type == Arg::GP ? Move : MoveDouble;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    };
+
+    Opcode conservativeMove = moveForWidth(Arg::conservativeWidth(type));
+
+    // We will emit things in reverse. We maintain a list of packs of instructions, and then we emit
+    // append them together in reverse (for example the thing at the end of resultPacks is placed
+    // first). This is useful because the last thing we emit frees up its destination registers, so
+    // it affects how we emit things before it.
+    Vector> resultPacks;
+    Vector result;
+
+    auto commitResult = [&] () {
+        resultPacks.append(WTFMove(result));
+    };
+
+    auto getScratch = [&] (unsigned index, Tmp possibleScratch) -> Tmp {
+        if (scratches[index].isTmp())
+            return scratches[index].tmp();
+
+        if (!possibleScratch)
+            return Tmp();
+        result.append(Inst(conservativeMove, origin, possibleScratch, scratches[index]));
+        return possibleScratch;
+    };
+
+    auto returnScratch = [&] (unsigned index, Tmp tmp) {
+        if (Arg(tmp) != scratches[index])
+            result.append(Inst(conservativeMove, origin, scratches[index], tmp));
+    };
+
+    auto handleShiftPair = [&] (const ShufflePair& pair, unsigned scratchIndex) {
+        Opcode move = moveForWidth(pair.width());
+        
+        if (!isValidForm(move, pair.src().kind(), pair.dst().kind())) {
+            Tmp scratch =
+                getScratch(scratchIndex, findPossibleScratch(code, type, pair.src(), pair.dst()));
+            RELEASE_ASSERT(scratch);
+            if (isValidForm(move, pair.src().kind(), Arg::Tmp))
+                result.append(Inst(moveForWidth(pair.width()), origin, pair.src(), scratch));
+            else {
+                ASSERT(pair.src().isSomeImm());
+                ASSERT(move == Move32);
+                result.append(Inst(Move, origin, Arg::bigImm(pair.src().value()), scratch));
+            }
+            result.append(Inst(moveForWidth(pair.width()), origin, scratch, pair.dst()));
+            returnScratch(scratchIndex, scratch);
+            return;
+        }
+        
+        result.append(Inst(move, origin, pair.src(), pair.dst()));
+    };
+
+    auto handleShift = [&] (Vector& shift) {
+        // FIXME: We could optimize the spill behavior of the shifter by checking if any of the
+        // shifts need spills. If they do, then we could try to get a register out here. Note that
+        // this may fail where the current strategy succeeds: out here we need a register that does
+        // not interfere with any of the shifts, while the current strategy only needs to find a
+        // scratch register that does not interfer with a particular shift. So, this optimization
+        // will be opportunistic: if it succeeds, then the individual shifts can use that scratch,
+        // otherwise they will do what they do now.
+        
+        for (unsigned i = shift.size(); i--;)
+            handleShiftPair(shift[i], 0);
+
+        Arg lastDst = shift.last().dst();
+        if (lastDst.isTmp()) {
+            for (Arg& scratch : scratches) {
+                ASSERT(scratch != lastDst);
+                if (!scratch.isTmp()) {
+                    scratch = lastDst;
+                    break;
+                }
+            }
+        }
+    };
+
+    // First handle shifts whose last destination is a tmp because these free up scratch registers.
+    // These end up last in the final sequence, so the final destination of these shifts will be
+    // available as a scratch location for anything emitted prior (so, after, since we're emitting in
+    // reverse).
+    for (auto& entry : shifts) {
+        Vector& shift = entry.value;
+        if (shift.last().dst().isTmp())
+            handleShift(shift);
+        commitResult();
+    }
+
+    // Now handle the rest of the shifts.
+    for (auto& entry : shifts) {
+        Vector& shift = entry.value;
+        if (!shift.last().dst().isTmp())
+            handleShift(shift);
+        commitResult();
+    }
+
+    for (Rotate& rotate : rotates) {
+        if (!rotate.fringe.isEmpty()) {
+            // Make sure we do the fringe first! This won't clobber any of the registers that are
+            // part of the rotation.
+            handleShift(rotate.fringe);
+        }
+        
+        bool canSwap = false;
+        Opcode swap = Oops;
+        Arg::Width swapWidth = Arg::Width8; // bogus value
+
+        // Currently, the swap instruction is not available for floating point on any architecture we
+        // support.
+        if (type == Arg::GP) {
+            // Figure out whether we will be doing 64-bit swaps or 32-bit swaps. If we have a mix of
+            // widths we handle that by fixing up the relevant register with zero-extends.
+            swap = Swap32;
+            swapWidth = Arg::Width32;
+            bool hasMemory = false;
+            bool hasIndex = false;
+            for (ShufflePair& pair : rotate.loop) {
+                switch (pair.width()) {
+                case Arg::Width32:
+                    break;
+                case Arg::Width64:
+                    swap = Swap64;
+                    swapWidth = Arg::Width64;
+                    break;
+                default:
+                    RELEASE_ASSERT_NOT_REACHED();
+                    break;
+                }
+
+                hasMemory |= pair.src().isMemory() || pair.dst().isMemory();
+                hasIndex |= pair.src().isIndex() || pair.dst().isIndex();
+            }
+            
+            canSwap = isValidForm(swap, Arg::Tmp, Arg::Tmp);
+
+            // We can totally use swaps even if there are shuffles involving memory. But, we play it
+            // safe in that case. There are corner cases we don't handle, and our ability to do it is
+            // contingent upon swap form availability.
+            
+            if (hasMemory) {
+                canSwap &= isValidForm(swap, Arg::Tmp, Arg::Addr);
+                
+                // We don't take the swapping path if there is a mix of widths and some of the
+                // shuffles involve memory. That gets too confusing. We might be able to relax this
+                // to only bail if there are subwidth pairs involving memory, but I haven't thought
+                // about it very hard. Anyway, this case is not common: rotates involving memory
+                // don't arise for function calls, and they will only happen for rotates in user code
+                // if some of the variables get spilled. It's hard to imagine a program that rotates
+                // data around in variables while also doing a combination of uint32->uint64 and
+                // int64->int32 casts.
+                for (ShufflePair& pair : rotate.loop)
+                    canSwap &= pair.width() == swapWidth;
+            }
+
+            if (hasIndex)
+                canSwap &= isValidForm(swap, Arg::Tmp, Arg::Index);
+        }
+
+        if (canSwap) {
+            for (unsigned i = rotate.loop.size() - 1; i--;) {
+                Arg left = rotate.loop[i].src();
+                Arg right = rotate.loop[i + 1].src();
+
+                if (left.isMemory() && right.isMemory()) {
+                    // Note that this is a super rare outcome. Rotates are rare. Spills are rare.
+                    // Moving data between two spills is rare. To get here a lot of rare stuff has to
+                    // all happen at once.
+                    
+                    Tmp scratch = getScratch(0, findPossibleScratch(code, type, left, right));
+                    RELEASE_ASSERT(scratch);
+                    result.append(Inst(moveForWidth(swapWidth), origin, left, scratch));
+                    result.append(Inst(swap, origin, scratch, right));
+                    result.append(Inst(moveForWidth(swapWidth), origin, scratch, left));
+                    returnScratch(0, scratch);
+                    continue;
+                }
+
+                if (left.isMemory())
+                    std::swap(left, right);
+                
+                result.append(Inst(swap, origin, left, right));
+            }
+
+            for (ShufflePair pair : rotate.loop) {
+                if (pair.width() == swapWidth)
+                    continue;
+
+                RELEASE_ASSERT(pair.width() == Arg::Width32);
+                RELEASE_ASSERT(swapWidth == Arg::Width64);
+                RELEASE_ASSERT(pair.dst().isTmp());
+
+                // Need to do an extra zero extension.
+                result.append(Inst(Move32, origin, pair.dst(), pair.dst()));
+            }
+        } else {
+            // We can treat this as a shift so long as we take the last destination (i.e. first
+            // source) and save it first. Then we handle the first entry in the pair in the rotate
+            // specially, after we restore the last destination. This requires some special care to
+            // find a scratch register. It's possible that we have a rotate that uses the entire
+            // available register file.
+
+            Tmp scratch = findPossibleScratch(
+                code, type,
+                [&] (Tmp tmp) -> bool {
+                    for (ShufflePair pair : rotate.loop) {
+                        if (pair.src().usesTmp(tmp))
+                            return false;
+                        if (pair.dst().usesTmp(tmp))
+                            return false;
+                    }
+                    return true;
+                });
+
+            // NOTE: This is the most likely use of scratch registers.
+            scratch = getScratch(0, scratch);
+
+            // We may not have found a scratch register. When this happens, we can just use the spill
+            // slot directly.
+            Arg rotateSave = scratch ? Arg(scratch) : scratches[0];
+            
+            handleShiftPair(
+                ShufflePair(rotate.loop.last().dst(), rotateSave, rotate.loop[0].width()), 1);
+
+            for (unsigned i = rotate.loop.size(); i-- > 1;)
+                handleShiftPair(rotate.loop[i], 1);
+
+            handleShiftPair(
+                ShufflePair(rotateSave, rotate.loop[0].dst(), rotate.loop[0].width()), 1);
+
+            if (scratch)
+                returnScratch(0, scratch);
+        }
+
+        commitResult();
+    }
+
+    ASSERT(result.isEmpty());
+
+    for (unsigned i = resultPacks.size(); i--;)
+        result.appendVector(resultPacks[i]);
+
+    return result;
+}
+
+Vector emitShuffle(
+    Code& code, const Vector& pairs,
+    const std::array& gpScratch, const std::array& fpScratch,
+    Value* origin)
+{
+    Vector gpPairs;
+    Vector fpPairs;
+    for (const ShufflePair& pair : pairs) {
+        if (pair.src().isMemory() && pair.dst().isMemory() && pair.width() > Arg::pointerWidth()) {
+            // 8-byte memory-to-memory moves on a 32-bit platform are best handled as float moves.
+            fpPairs.append(pair);
+        } else if (pair.src().isGP() && pair.dst().isGP()) {
+            // This means that gpPairs gets memory-to-memory shuffles. The assumption is that we
+            // can do that more efficiently using GPRs, except in the special case above.
+            gpPairs.append(pair);
+        } else
+            fpPairs.append(pair);
+    }
+
+    Vector result;
+    result.appendVector(emitShuffle(code, gpPairs, gpScratch, Arg::GP, origin));
+    result.appendVector(emitShuffle(code, fpPairs, fpScratch, Arg::FP, origin));
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirEmitShuffle.h b/b3/air/AirEmitShuffle.h
new file mode 100644
index 0000000..b2c3bb0
--- /dev/null
+++ b/b3/air/AirEmitShuffle.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirInst.h"
+#include 
+
+namespace JSC { namespace B3 {
+
+class Value;
+
+namespace Air {
+
+class Code;
+
+class ShufflePair {
+public:
+    ShufflePair()
+    {
+    }
+    
+    ShufflePair(const Arg& src, const Arg& dst, Arg::Width width)
+        : m_src(src)
+        , m_dst(dst)
+        , m_width(width)
+    {
+    }
+
+    const Arg& src() const { return m_src; }
+    const Arg& dst() const { return m_dst; }
+
+    // The width determines the kind of move we do. You can only choose Width32 or Width64 right now.
+    // For GP, it picks between Move32 and Move. For FP, it picks between MoveFloat and MoveDouble.
+    Arg::Width width() const { return m_width; }
+
+    void dump(PrintStream&) const;
+    
+private:
+    Arg m_src;
+    Arg m_dst;
+    Arg::Width m_width { Arg::Width8 };
+};
+
+// Create a Shuffle instruction.
+Inst createShuffle(Value* origin, const Vector&);
+
+// Perform a shuffle of a given type. The scratch argument is mandatory. You should pass it as
+// follows: If you know that you have scratch registers or temporaries available - that is, they're
+// registers that are not mentioned in the shuffle, have the same type as the shuffle, and are not
+// live at the shuffle - then you can pass them. If you don't have scratch registers available or if
+// you don't feel like looking for them, you can pass memory locations. It's always safe to pass a
+// pair of memory locations, and replacing either memory location with a register can be viewed as an
+// optimization. It's a pretty important optimization. Some more notes:
+//
+// - We define scratch registers as things that are not live before the shuffle and are not one of
+//   the destinations of the shuffle. Not being live before the shuffle also means that they cannot
+//   be used for any of the sources of the shuffle.
+//
+// - A second scratch location is only needed when you have shuffle pairs where memory is used both
+//   as source and destination.
+//
+// - You're guaranteed not to need any scratch locations if there is a Swap instruction available for
+//   the type and you don't have any memory locations that are both the source and the destination of
+//   some pairs. GP supports Swap on x86 while FP never supports Swap.
+//
+// - Passing memory locations as scratch if are running emitShuffle() before register allocation is
+//   silly, since that will cause emitShuffle() to pick some specific registers when it does need
+//   scratch. One easy way to avoid that predicament is to ensure that you call emitShuffle() after
+//   register allocation. For this reason we could add a Shuffle instruction so that we can defer
+//   shufflings until after regalloc.
+//
+// - Shuffles with memory=>memory pairs are not very well tuned. You should avoid them if you want
+//   performance. If you need to do them, then making sure that you reserve a temporary is one way to
+//   get acceptable performance.
+//
+// NOTE: Use this method (and its friend below) to emit shuffles after register allocation. Before
+// register allocation it is much better to simply use the Shuffle instruction.
+Vector emitShuffle(
+    Code& code, Vector, std::array scratch, Arg::Type, Value* origin);
+
+// Perform a shuffle that involves any number of types. Pass scratch registers or memory locations
+// for each type according to the rules above.
+Vector emitShuffle(
+    Code& code, const Vector&,
+    const std::array& gpScratch, const std::array& fpScratch,
+    Value* origin);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirFixObviousSpills.cpp b/b3/air/AirFixObviousSpills.cpp
new file mode 100644
index 0000000..d000d6c
--- /dev/null
+++ b/b3/air/AirFixObviousSpills.cpp
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixObviousSpills.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+
+class FixObviousSpills {
+public:
+    FixObviousSpills(Code& code)
+        : m_code(code)
+        , m_atHead(code.size())
+    {
+    }
+
+    void run()
+    {
+        if (verbose)
+            dataLog("Code before fixObviousSpills:\n", m_code);
+        
+        computeAliases();
+        fixCode();
+    }
+
+private:
+    void computeAliases()
+    {
+        m_atHead[m_code[0]].wasVisited = true;
+        
+        bool changed = true;
+        while (changed) {
+            changed = false;
+            
+            for (BasicBlock* block : m_code) {
+                m_block = block;
+                m_state = m_atHead[block];
+                if (!m_state.wasVisited)
+                    continue;
+
+                if (verbose)
+                    dataLog("Executing block ", *m_block, ": ", m_state, "\n");
+                
+                for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex)
+                    executeInst();
+
+                for (BasicBlock* successor : block->successorBlocks()) {
+                    State& toState = m_atHead[successor];
+                    if (toState.wasVisited)
+                        changed |= toState.merge(m_state);
+                    else {
+                        toState = m_state;
+                        changed = true;
+                    }
+                }
+            }
+        }
+    }
+
+    void fixCode()
+    {
+        for (BasicBlock* block : m_code) {
+            m_block = block;
+            m_state = m_atHead[block];
+            RELEASE_ASSERT(m_state.wasVisited);
+
+            for (m_instIndex = 0; m_instIndex < block->size(); ++m_instIndex) {
+                fixInst();
+                executeInst();
+            }
+        }
+    }
+
+    void executeInst()
+    {
+        Inst& inst = m_block->at(m_instIndex);
+
+        if (verbose)
+            dataLog("    Executing ", inst, ": ", m_state, "\n");
+
+        Inst::forEachDefWithExtraClobberedRegs(
+            &inst, &inst,
+            [&] (const Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                if (verbose)
+                    dataLog("        Clobbering ", arg, "\n");
+                m_state.clobber(arg);
+            });
+
+        switch (inst.kind.opcode) {
+        case Move:
+            if (inst.args[0].isSomeImm()) {
+                if (inst.args[1].isReg())
+                    m_state.addAlias(RegConst(inst.args[1].reg(), inst.args[0].value()));
+                else if (isSpillSlot(inst.args[1]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), inst.args[0].value()));
+            } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(RegConst(inst.args[1].reg(), *constant));
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), *constant));
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+            }
+            break;
+
+        case Move32:
+            if (inst.args[0].isSomeImm()) {
+                if (inst.args[1].isReg())
+                    m_state.addAlias(RegConst(inst.args[1].reg(), static_cast(inst.args[0].value())));
+                else if (isSpillSlot(inst.args[1]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast(inst.args[0].value())));
+            } else if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(RegConst(inst.args[1].reg(), static_cast(*constant)));
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::ZExt32));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                if (std::optional constant = m_state.constantFor(inst.args[0]))
+                    m_state.addAlias(SlotConst(inst.args[1].stackSlot(), static_cast(*constant)));
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+            }
+            break;
+
+        case MoveFloat:
+            if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::Match32));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::Match32));
+            }
+            break;
+
+        case MoveDouble:
+            if (isSpillSlot(inst.args[0]) && inst.args[1].isReg()) {
+                m_state.addAlias(
+                    RegSlot(inst.args[1].reg(), inst.args[0].stackSlot(), RegSlot::AllBits));
+            } else if (inst.args[0].isReg() && isSpillSlot(inst.args[1])) {
+                m_state.addAlias(
+                    RegSlot(inst.args[0].reg(), inst.args[1].stackSlot(), RegSlot::AllBits));
+            }
+            break;
+
+        default:
+            break;
+        }
+    }
+
+    void fixInst()
+    {
+        Inst& inst = m_block->at(m_instIndex);
+
+        if (verbose)
+            dataLog("Fixing inst ", inst, ": ", m_state, "\n");
+        
+        // First handle some special instructions.
+        switch (inst.kind.opcode) {
+        case Move: {
+            if (inst.args[0].isBigImm() && inst.args[1].isReg()
+                && isValidForm(Add64, Arg::Imm, Arg::Tmp, Arg::Tmp)) {
+                // BigImm materializations are super expensive on both x86 and ARM. Let's try to
+                // materialize this bad boy using math instead. Note that we use unsigned math here
+                // since it's more deterministic.
+                uint64_t myValue = inst.args[0].value();
+                Reg myDest = inst.args[1].reg();
+                for (const RegConst& regConst : m_state.regConst) {
+                    uint64_t otherValue = regConst.constant;
+                    
+                    // Let's try add. That's the only thing that works on all platforms, since it's
+                    // the only cheap arithmetic op that x86 does in three operands. Long term, we
+                    // should add fancier materializations here for ARM if the BigImm is yuge.
+                    uint64_t delta = myValue - otherValue;
+                    
+                    if (Arg::isValidImmForm(delta)) {
+                        inst.kind = Add64;
+                        inst.args.resize(3);
+                        inst.args[0] = Arg::imm(delta);
+                        inst.args[1] = Tmp(regConst.reg);
+                        inst.args[2] = Tmp(myDest);
+                        return;
+                    }
+                }
+                return;
+            }
+            break;
+        }
+            
+        default:
+            break;
+        }
+
+        // Create a copy in case we invalidate the instruction. That doesn't happen often.
+        Inst instCopy = inst;
+
+        // The goal is to replace references to stack slots. We only care about early uses. We can't
+        // handle UseDefs. We could teach this to handle UseDefs if we inserted a store instruction
+        // after and we proved that the register aliased to the stack slot dies here. We can get that
+        // information from the liveness analysis. We also can't handle late uses, because we don't
+        // look at late clobbers when doing this.
+        bool didThings = false;
+        auto handleArg = [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width width) {
+            if (!isSpillSlot(arg))
+                return;
+            if (!Arg::isEarlyUse(role))
+                return;
+            if (Arg::isAnyDef(role))
+                return;
+            
+            // Try to get a register if at all possible.
+            if (const RegSlot* alias = m_state.getRegSlot(arg.stackSlot())) {
+                switch (width) {
+                case Arg::Width64:
+                    if (alias->mode != RegSlot::AllBits)
+                        return;
+                    if (verbose)
+                        dataLog("    Replacing ", arg, " with ", alias->reg, "\n");
+                    arg = Tmp(alias->reg);
+                    didThings = true;
+                    return;
+                case Arg::Width32:
+                    if (verbose)
+                        dataLog("    Replacing ", arg, " with ", alias->reg, " (subwidth case)\n");
+                    arg = Tmp(alias->reg);
+                    didThings = true;
+                    return;
+                default:
+                    return;
+                }
+            }
+
+            // Revert to immediate if that didn't work.
+            if (const SlotConst* alias = m_state.getSlotConst(arg.stackSlot())) {
+                if (verbose)
+                    dataLog("    Replacing ", arg, " with constant ", alias->constant, "\n");
+                if (Arg::isValidImmForm(alias->constant))
+                    arg = Arg::imm(alias->constant);
+                else
+                    arg = Arg::bigImm(alias->constant);
+                didThings = true;
+                return;
+            }
+        };
+        
+        inst.forEachArg(handleArg);
+        if (!didThings || inst.isValidForm())
+            return;
+        
+        // We introduced something invalid along the way. Back up and carefully handle each argument.
+        inst = instCopy;
+        ASSERT(inst.isValidForm());
+        inst.forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                Arg argCopy = arg;
+                handleArg(arg, role, type, width);
+                if (!inst.isValidForm())
+                    arg = argCopy;
+            });
+    }
+    
+    static bool isSpillSlot(const Arg& arg)
+    {
+        return arg.isStack() && arg.stackSlot()->isSpill();
+    }
+    
+    struct RegConst {
+        RegConst()
+        {
+        }
+        
+        RegConst(Reg reg, int64_t constant)
+            : reg(reg)
+            , constant(constant)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return !!reg;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(reg, "->", constant);
+        }
+        
+        Reg reg;
+        int64_t constant { 0 };
+    };
+
+    struct RegSlot {
+        enum Mode : int8_t {
+            AllBits,
+            ZExt32, // Register contains zero-extended contents of stack slot.
+            Match32 // Low 32 bits of register match low 32 bits of stack slot.
+        };
+        
+        RegSlot()
+        {
+        }
+
+        RegSlot(Reg reg, StackSlot* slot, Mode mode)
+            : slot(slot)
+            , reg(reg)
+            , mode(mode)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return slot && reg;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(pointerDump(slot), "->", reg);
+            switch (mode) {
+            case AllBits:
+                out.print("(AllBits)");
+                break;
+            case ZExt32:
+                out.print("(ZExt32)");
+                break;
+            case Match32:
+                out.print("(Match32)");
+                break;
+            }
+        }
+        
+        StackSlot* slot { nullptr };
+        Reg reg;
+        Mode mode { AllBits };
+    };
+
+    struct SlotConst {
+        SlotConst()
+        {
+        }
+
+        SlotConst(StackSlot* slot, int64_t constant)
+            : slot(slot)
+            , constant(constant)
+        {
+        }
+
+        explicit operator bool() const
+        {
+            return slot;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(pointerDump(slot), "->", constant);
+        }
+        
+        StackSlot* slot { nullptr };
+        int64_t constant { 0 };
+    };
+
+    struct State {
+        void addAlias(const RegConst& newAlias)
+        {
+            regConst.append(newAlias);
+        }
+        void addAlias(const RegSlot& newAlias)
+        {
+            regSlot.append(newAlias);
+        }
+        void addAlias(const SlotConst& newAlias)
+        {
+            slotConst.append(newAlias);
+        }
+
+        const RegConst* getRegConst(Reg reg) const
+        {
+            for (const RegConst& alias : regConst) {
+                if (alias.reg == reg)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(Reg reg) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.reg == reg)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(StackSlot* slot) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const RegSlot* getRegSlot(Reg reg, StackSlot* slot) const
+        {
+            for (const RegSlot& alias : regSlot) {
+                if (alias.reg == reg && alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        const SlotConst* getSlotConst(StackSlot* slot) const
+        {
+            for (const SlotConst& alias : slotConst) {
+                if (alias.slot == slot)
+                    return &alias;
+            }
+            return nullptr;
+        }
+
+        std::optional constantFor(const Arg& arg)
+        {
+            if (arg.isReg()) {
+                if (const RegConst* alias = getRegConst(arg.reg()))
+                    return alias->constant;
+                return std::nullopt;
+            }
+            if (arg.isStack()) {
+                if (const SlotConst* alias = getSlotConst(arg.stackSlot()))
+                    return alias->constant;
+                return std::nullopt;
+            }
+            return std::nullopt;
+        }
+
+        void clobber(const Arg& arg)
+        {
+            if (arg.isReg()) {
+                regConst.removeAllMatching(
+                    [&] (const RegConst& alias) -> bool {
+                        return alias.reg == arg.reg();
+                    });
+                regSlot.removeAllMatching(
+                    [&] (const RegSlot& alias) -> bool {
+                        return alias.reg == arg.reg();
+                    });
+                return;
+            }
+            if (arg.isStack()) {
+                slotConst.removeAllMatching(
+                    [&] (const SlotConst& alias) -> bool {
+                        return alias.slot == arg.stackSlot();
+                    });
+                regSlot.removeAllMatching(
+                    [&] (const RegSlot& alias) -> bool {
+                        return alias.slot == arg.stackSlot();
+                    });
+            }
+        }
+
+        bool merge(const State& other)
+        {
+            bool changed = false;
+            
+            changed |= !!regConst.removeAllMatching(
+                [&] (RegConst& alias) -> bool {
+                    const RegConst* otherAlias = other.getRegConst(alias.reg);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.constant != otherAlias->constant)
+                        return true;
+                    return false;
+                });
+
+            changed |= !!slotConst.removeAllMatching(
+                [&] (SlotConst& alias) -> bool {
+                    const SlotConst* otherAlias = other.getSlotConst(alias.slot);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.constant != otherAlias->constant)
+                        return true;
+                    return false;
+                });
+
+            changed |= !!regSlot.removeAllMatching(
+                [&] (RegSlot& alias) -> bool {
+                    const RegSlot* otherAlias = other.getRegSlot(alias.reg, alias.slot);
+                    if (!otherAlias)
+                        return true;
+                    if (alias.mode != RegSlot::Match32 && alias.mode != otherAlias->mode) {
+                        alias.mode = RegSlot::Match32;
+                        changed = true;
+                    }
+                    return false;
+                });
+
+            return changed;
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{regConst = [", listDump(regConst), "], slotConst = [", listDump(slotConst),
+                "], regSlot = [", listDump(regSlot), "], wasVisited = ", wasVisited, "}");
+        }
+
+        Vector regConst;
+        Vector slotConst;
+        Vector regSlot;
+        bool wasVisited { false };
+    };
+
+    Code& m_code;
+    IndexMap m_atHead;
+    State m_state;
+    BasicBlock* m_block { nullptr };
+    unsigned m_instIndex { 0 };
+};
+
+} // anonymous namespace
+
+void fixObviousSpills(Code& code)
+{
+    PhaseScope phaseScope(code, "fixObviousSpills");
+
+    FixObviousSpills fixObviousSpills(code);
+    fixObviousSpills.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirFixObviousSpills.h b/b3/air/AirFixObviousSpills.h
new file mode 100644
index 0000000..fb8e41f
--- /dev/null
+++ b/b3/air/AirFixObviousSpills.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a forward flow phase that tracks equivalence between spills slots and registers. It
+// removes loads from spill slots in cases when the contents of the spill slot can be found in (or
+// computed from) a register.
+void fixObviousSpills(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirFixPartialRegisterStalls.cpp b/b3/air/AirFixPartialRegisterStalls.cpp
new file mode 100644
index 0000000..b3d5d0b
--- /dev/null
+++ b/b3/air/AirFixPartialRegisterStalls.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirFixPartialRegisterStalls.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInst.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "MacroAssembler.h"
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool hasPartialXmmRegUpdate(const Inst& inst)
+{
+    switch (inst.kind.opcode) {
+    case ConvertDoubleToFloat:
+    case ConvertFloatToDouble:
+    case ConvertInt32ToDouble:
+    case ConvertInt64ToDouble:
+    case ConvertInt32ToFloat:
+    case ConvertInt64ToFloat:
+    case SqrtDouble:
+    case SqrtFloat:
+    case CeilDouble:
+    case CeilFloat:
+    case FloorDouble:
+    case FloorFloat:
+        return true;
+    default:
+        break;
+    }
+    return false;
+}
+
+bool isDependencyBreaking(const Inst& inst)
+{
+    // "xorps reg, reg" is used by the frontend to remove the dependency on its argument.
+    return inst.kind.opcode == MoveZeroToDouble;
+}
+
+// FIXME: find a good distance per architecture experimentally.
+// LLVM uses a distance of 16 but that comes from Nehalem.
+unsigned char minimumSafeDistance = 16;
+
+struct FPDefDistance {
+    FPDefDistance()
+    {
+        for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i)
+            distance[i] = 255;
+    }
+
+    void reset(FPRReg reg)
+    {
+        unsigned index = MacroAssembler::fpRegisterIndex(reg);
+        distance[index] = 255;
+    }
+
+    void add(FPRReg reg, unsigned registerDistance)
+    {
+        unsigned index = MacroAssembler::fpRegisterIndex(reg);
+        if (registerDistance < distance[index])
+            distance[index] = static_cast(registerDistance);
+    }
+
+    bool updateFromPrecessor(FPDefDistance& precessorDistance, unsigned constantOffset = 0)
+    {
+        bool changed = false;
+        for (unsigned i = 0; i < MacroAssembler::numberOfFPRegisters(); ++i) {
+            unsigned regDistance = precessorDistance.distance[i] + constantOffset;
+            if (regDistance < minimumSafeDistance && regDistance < distance[i]) {
+                distance[i] = regDistance;
+                changed = true;
+            }
+        }
+        return changed;
+    }
+
+    unsigned char distance[MacroAssembler::numberOfFPRegisters()];
+};
+
+void updateDistances(Inst& inst, FPDefDistance& localDistance, unsigned& distanceToBlockEnd)
+{
+    --distanceToBlockEnd;
+
+    if (isDependencyBreaking(inst)) {
+        localDistance.reset(inst.args[0].tmp().fpr());
+        return;
+    }
+
+    inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+        ASSERT_WITH_MESSAGE(tmp.isReg(), "This phase must be run after register allocation.");
+
+        if (tmp.isFPR() && Arg::isAnyDef(role))
+            localDistance.add(tmp.fpr(), distanceToBlockEnd);
+    });
+}
+
+}
+
+void fixPartialRegisterStalls(Code& code)
+{
+    if (!isX86())
+        return;
+
+    PhaseScope phaseScope(code, "fixPartialRegisterStalls");
+
+    Vector candidates;
+
+    for (BasicBlock* block : code) {
+        for (const Inst& inst : *block) {
+            if (hasPartialXmmRegUpdate(inst)) {
+                candidates.append(block);
+                break;
+            }
+        }
+    }
+
+    // Fortunately, Partial Stalls are rarely used. Return early if no block
+    // cares about them.
+    if (candidates.isEmpty())
+        return;
+
+    // For each block, this provides the distance to the last instruction setting each register
+    // on block *entry*.
+    IndexMap lastDefDistance(code.size());
+
+    // Blocks with dirty distance at head.
+    IndexSet dirty;
+
+    // First, we compute the local distance for each block and push it to the successors.
+    for (BasicBlock* block : code) {
+        FPDefDistance localDistance;
+
+        unsigned distanceToBlockEnd = block->size();
+        for (Inst& inst : *block)
+            updateDistances(inst, localDistance, distanceToBlockEnd);
+
+        for (BasicBlock* successor : block->successorBlocks()) {
+            if (lastDefDistance[successor].updateFromPrecessor(localDistance))
+                dirty.add(successor);
+        }
+    }
+
+    // Now we propagate the minimums accross blocks.
+    bool changed;
+    do {
+        changed = false;
+
+        for (BasicBlock* block : code) {
+            if (!dirty.remove(block))
+                continue;
+
+            // Little shortcut: if the block is big enough, propagating it won't add any information.
+            if (block->size() >= minimumSafeDistance)
+                continue;
+
+            unsigned blockSize = block->size();
+            FPDefDistance& blockDistance = lastDefDistance[block];
+            for (BasicBlock* successor : block->successorBlocks()) {
+                if (lastDefDistance[successor].updateFromPrecessor(blockDistance, blockSize)) {
+                    dirty.add(successor);
+                    changed = true;
+                }
+            }
+        }
+    } while (changed);
+
+    // Finally, update each block as needed.
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : candidates) {
+        unsigned distanceToBlockEnd = block->size();
+        FPDefDistance& localDistance = lastDefDistance[block];
+
+        for (unsigned i = 0; i < block->size(); ++i) {
+            Inst& inst = block->at(i);
+
+            if (hasPartialXmmRegUpdate(inst)) {
+                RegisterSet defs;
+                RegisterSet uses;
+                inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type, Arg::Width) {
+                    if (tmp.isFPR()) {
+                        if (Arg::isAnyDef(role))
+                            defs.set(tmp.fpr());
+                        if (Arg::isAnyUse(role))
+                            uses.set(tmp.fpr());
+                    }
+                });
+                // We only care about values we define but not use. Otherwise we have to wait
+                // for the value to be resolved anyway.
+                defs.exclude(uses);
+
+                defs.forEach([&] (Reg reg) {
+                    if (localDistance.distance[MacroAssembler::fpRegisterIndex(reg.fpr())] < minimumSafeDistance)
+                        insertionSet.insert(i, MoveZeroToDouble, inst.origin, Tmp(reg));
+                });
+            }
+
+            updateDistances(inst, localDistance, distanceToBlockEnd);
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirFixPartialRegisterStalls.h b/b3/air/AirFixPartialRegisterStalls.h
new file mode 100644
index 0000000..0093279
--- /dev/null
+++ b/b3/air/AirFixPartialRegisterStalls.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// x86 has a pipelining hazard caused by false dependencies between instructions.
+//
+// Some instructions update only part of a register, they can only be scheduled after
+// the previous definition is computed. This problem can be avoided by the compiler
+// by explicitely resetting the entire register before executing the instruction with
+// partial update.
+//
+// See "Partial XMM Register Stalls" and "Dependency Breaking Idioms" in the manual.
+void fixPartialRegisterStalls(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirFrequentedBlock.h b/b3/air/AirFrequentedBlock.h
new file mode 100644
index 0000000..37cd287
--- /dev/null
+++ b/b3/air/AirFrequentedBlock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3GenericFrequentedBlock.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+
+typedef GenericFrequentedBlock FrequentedBlock;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirGenerate.cpp b/b3/air/AirGenerate.cpp
new file mode 100644
index 0000000..a99f050
--- /dev/null
+++ b/b3/air/AirGenerate.cpp
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirGenerate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirAllocateStack.h"
+#include "AirCode.h"
+#include "AirDumpAsJS.h"
+#include "AirEliminateDeadCode.h"
+#include "AirFixObviousSpills.h"
+#include "AirFixPartialRegisterStalls.h"
+#include "AirGenerationContext.h"
+#include "AirHandleCalleeSaves.h"
+#include "AirIteratedRegisterCoalescing.h"
+#include "AirLogRegisterPressure.h"
+#include "AirLowerAfterRegAlloc.h"
+#include "AirLowerEntrySwitch.h"
+#include "AirLowerMacros.h"
+#include "AirOpcodeUtils.h"
+#include "AirOptimizeBlockOrder.h"
+#include "AirReportUsedRegisters.h"
+#include "AirSimplifyCFG.h"
+#include "AirSpillEverything.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+#include "B3Procedure.h"
+#include "B3TimingScope.h"
+#include "B3ValueInlines.h"
+#include "CCallHelpers.h"
+#include "DisallowMacroScratchRegisterUsage.h"
+#include "LinkBuffer.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void prepareForGeneration(Code& code)
+{
+    TimingScope timingScope("Air::prepareForGeneration");
+    
+    // We don't expect the incoming code to have predecessors computed.
+    code.resetReachability();
+    
+    if (shouldValidateIR())
+        validate(code);
+
+    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
+    if (shouldDumpIR(AirMode) && !shouldDumpIRAtEachPhase(AirMode)) {
+        dataLog("Initial air:\n");
+        dataLog(code);
+    }
+
+    lowerMacros(code);
+
+    // This is where we run our optimizations and transformations.
+    // FIXME: Add Air optimizations.
+    // https://bugs.webkit.org/show_bug.cgi?id=150456
+    
+    eliminateDeadCode(code);
+
+    // Register allocation for all the Tmps that do not have a corresponding machine register.
+    // After this phase, every Tmp has a reg.
+    //
+    // For debugging, you can use spillEverything() to put everything to the stack between each Inst.
+    if (Options::airSpillsEverything())
+        spillEverything(code);
+    else
+        iteratedRegisterCoalescing(code);
+
+    if (Options::logAirRegisterPressure()) {
+        dataLog("Register pressure after register allocation:\n");
+        logRegisterPressure(code);
+    }
+
+    // This replaces uses of spill slots with registers or constants if possible. It does this by
+    // minimizing the amount that we perturb the already-chosen register allocation. It may extend
+    // the live ranges of registers though.
+    fixObviousSpills(code);
+
+    lowerAfterRegAlloc(code);
+
+    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
+    // does things like identify which callee-saves we're using and saves them.
+    handleCalleeSaves(code);
+    
+    if (Options::dumpAirAsJSBeforeAllocateStack()) {
+        dataLog("Dumping Air as JS before allocateStack:\n");
+        dumpAsJS(code, WTF::dataFile());
+        dataLog("Air hash: ", code.jsHash(), "\n");
+    }
+
+    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
+    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
+    // shouldn't have to worry about this very much.
+    allocateStack(code);
+    
+    if (Options::dumpAirAfterAllocateStack()) {
+        dataLog("Dumping Air after allocateStack:\n");
+        dataLog(code);
+        dataLog("Air hash: ", code.jsHash(), "\n");
+    }
+
+    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
+    // phase.
+    simplifyCFG(code);
+
+    // This is needed to satisfy a requirement of B3::StackmapValue.
+    reportUsedRegisters(code);
+
+    // Attempt to remove false dependencies between instructions created by partial register changes.
+    // This must be executed as late as possible as it depends on the instructions order and register
+    // use. We _must_ run this after reportUsedRegisters(), since that kills variable assignments
+    // that seem dead. Luckily, this phase does not change register liveness, so that's OK.
+    fixPartialRegisterStalls(code);
+    
+    // Actually create entrypoints.
+    lowerEntrySwitch(code);
+    
+    // The control flow graph can be simplified further after we have lowered EntrySwitch.
+    simplifyCFG(code);
+
+    // This sorts the basic blocks in Code to achieve an ordering that maximizes the likelihood that a high
+    // frequency successor is also the fall-through target.
+    optimizeBlockOrder(code);
+
+    if (shouldValidateIR())
+        validate(code);
+
+    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
+    // since the final generation is not a phase.
+    if (shouldDumpIR(AirMode)) {
+        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
+        dataLog(code);
+    }
+}
+
+void generate(Code& code, CCallHelpers& jit)
+{
+    TimingScope timingScope("Air::generate");
+
+    DisallowMacroScratchRegisterUsage disallowScratch(jit);
+
+    auto argFor = [&] (const RegisterAtOffset& entry) -> CCallHelpers::Address {
+        return CCallHelpers::Address(GPRInfo::callFrameRegister, entry.offset());
+    };
+    
+    // And now, we generate code.
+    GenerationContext context;
+    context.code = &code;
+    context.blockLabels.resize(code.size());
+    for (BasicBlock* block : code) {
+        if (block)
+            context.blockLabels[block] = Box::create();
+    }
+    IndexMap blockJumps(code.size());
+
+    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
+        if (context.blockLabels[target]->isSet()) {
+            jump.linkTo(*context.blockLabels[target], &jit);
+            return;
+        }
+
+        blockJumps[target].append(jump);
+    };
+
+    PCToOriginMap& pcToOriginMap = code.proc().pcToOriginMap();
+    auto addItem = [&] (Inst& inst) {
+        if (!inst.origin) {
+            pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin());
+            return;
+        }
+        pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), inst.origin->origin());
+    };
+
+    for (BasicBlock* block : code) {
+        context.currentBlock = block;
+        context.indexInBlock = UINT_MAX;
+        blockJumps[block].link(&jit);
+        CCallHelpers::Label label = jit.label();
+        *context.blockLabels[block] = label;
+
+        if (code.isEntrypoint(block)) {
+            jit.emitFunctionPrologue();
+            if (code.frameSize())
+                jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
+            
+            for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+                if (entry.reg().isGPR())
+                    jit.storePtr(entry.reg().gpr(), argFor(entry));
+                else
+                    jit.storeDouble(entry.reg().fpr(), argFor(entry));
+            }
+        }
+        
+        ASSERT(block->size() >= 1);
+        for (unsigned i = 0; i < block->size() - 1; ++i) {
+            context.indexInBlock = i;
+            Inst& inst = block->at(i);
+            addItem(inst);
+            CCallHelpers::Jump jump = inst.generate(jit, context);
+            ASSERT_UNUSED(jump, !jump.isSet());
+        }
+
+        context.indexInBlock = block->size() - 1;
+        
+        if (block->last().kind.opcode == Jump
+            && block->successorBlock(0) == code.findNextBlock(block))
+            continue;
+
+        addItem(block->last());
+
+        if (isReturn(block->last().kind.opcode)) {
+            // We currently don't represent the full prologue/epilogue in Air, so we need to
+            // have this override.
+            if (code.frameSize()) {
+                for (const RegisterAtOffset& entry : code.calleeSaveRegisters()) {
+                    if (entry.reg().isGPR())
+                        jit.loadPtr(argFor(entry), entry.reg().gpr());
+                    else
+                        jit.loadDouble(argFor(entry), entry.reg().fpr());
+                }
+                jit.emitFunctionEpilogue();
+            } else
+                jit.emitFunctionEpilogueWithEmptyFrame();
+            jit.ret();
+            addItem(block->last());
+            continue;
+        }
+
+        CCallHelpers::Jump jump = block->last().generate(jit, context);
+        // The jump won't be set for patchpoints. It won't be set for Oops because then it won't have
+        // any successors.
+        if (jump.isSet()) {
+            switch (block->numSuccessors()) {
+            case 1:
+                link(jump, block->successorBlock(0));
+                break;
+            case 2:
+                link(jump, block->successorBlock(0));
+                if (block->successorBlock(1) != code.findNextBlock(block))
+                    link(jit.jump(), block->successorBlock(1));
+                break;
+            default:
+                RELEASE_ASSERT_NOT_REACHED();
+                break;
+            }
+        }
+        addItem(block->last());
+    }
+    
+    context.currentBlock = nullptr;
+    context.indexInBlock = UINT_MAX;
+    
+    Vector entrypointLabels(code.numEntrypoints());
+    for (unsigned i = code.numEntrypoints(); i--;)
+        entrypointLabels[i] = *context.blockLabels[code.entrypoint(i).block()];
+    code.setEntrypointLabels(WTFMove(entrypointLabels));
+
+    pcToOriginMap.appendItem(jit.label(), Origin());
+    // FIXME: Make late paths have Origins: https://bugs.webkit.org/show_bug.cgi?id=153689
+    for (auto& latePath : context.latePaths)
+        latePath->run(jit, context);
+    pcToOriginMap.appendItem(jit.label(), Origin());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirGenerate.h b/b3/air/AirGenerate.h
new file mode 100644
index 0000000..60839be
--- /dev/null
+++ b/b3/air/AirGenerate.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC {
+
+class CCallHelpers;
+
+namespace B3 { namespace Air {
+
+class Code;
+
+// This takes an Air::Code that hasn't had any stack allocation and optionally hasn't had any
+// register allocation and does both of those things.
+JS_EXPORT_PRIVATE void prepareForGeneration(Code&);
+
+// This generates the code using the given CCallHelpers instance. Note that this may call callbacks
+// in the supplied code as it is generating.
+JS_EXPORT_PRIVATE void generate(Code&, CCallHelpers&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirGenerated.cpp b/b3/air/AirGenerated.cpp
new file mode 100644
index 0000000..6dd2304
--- /dev/null
+++ b/b3/air/AirGenerated.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#if ENABLE(B3_JIT)
+
+// This is generated by opcode_generator.rb.
+#include "AirOpcodeGenerated.h"
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirGenerationContext.h b/b3/air/AirGenerationContext.h
new file mode 100644
index 0000000..17e1126
--- /dev/null
+++ b/b3/air/AirGenerationContext.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "CCallHelpers.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+struct GenerationContext {
+    typedef void LatePathFunction(CCallHelpers&, GenerationContext&);
+    typedef SharedTask LatePath;
+
+    Vector> latePaths;
+    IndexMap> blockLabels;
+    BasicBlock* currentBlock { nullptr };
+    unsigned indexInBlock { UINT_MAX };
+    Code* code { nullptr };
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirHandleCalleeSaves.cpp b/b3/air/AirHandleCalleeSaves.cpp
new file mode 100644
index 0000000..97cdfa1
--- /dev/null
+++ b/b3/air/AirHandleCalleeSaves.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirHandleCalleeSaves.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void handleCalleeSaves(Code& code)
+{
+    PhaseScope phaseScope(code, "handleCalleeSaves");
+
+    RegisterSet usedCalleeSaves;
+
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            inst.forEachTmpFast(
+                [&] (Tmp& tmp) {
+                    // At first we just record all used regs.
+                    usedCalleeSaves.set(tmp.reg());
+                });
+
+            if (inst.kind.opcode == Patch)
+                usedCalleeSaves.merge(inst.extraClobberedRegs());
+        }
+    }
+
+    // Now we filter to really get the callee saves.
+    usedCalleeSaves.filter(RegisterSet::calleeSaveRegisters());
+    usedCalleeSaves.filter(code.mutableRegs());
+    usedCalleeSaves.exclude(RegisterSet::stackRegisters()); // We don't need to save FP here.
+
+    if (!usedCalleeSaves.numberOfSetRegisters())
+        return;
+
+    code.calleeSaveRegisters() = RegisterAtOffsetList(usedCalleeSaves);
+
+    size_t byteSize = 0;
+    for (const RegisterAtOffset& entry : code.calleeSaveRegisters())
+        byteSize = std::max(static_cast(-entry.offset()), byteSize);
+
+    StackSlot* savesArea = code.addStackSlot(byteSize, StackSlotKind::Locked);
+    // This is a bit weird since we could have already pinned a different stack slot to this
+    // area. Also, our runtime does not require us to pin the saves area. Maybe we shouldn't pin it?
+    savesArea->setOffsetFromFP(-byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirHandleCalleeSaves.h b/b3/air/AirHandleCalleeSaves.h
new file mode 100644
index 0000000..b4b78a3
--- /dev/null
+++ b/b3/air/AirHandleCalleeSaves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This phase identifies callee-save registers and adds code to save/restore them in the
+// prologue/epilogue to the code. It's a mandatory phase.
+
+// FIXME: It would be cool to make this more interactive with the Air client and also more
+// powerful.
+// We should have shrink wrapping: https://bugs.webkit.org/show_bug.cgi?id=150458
+// We should make this interact with the client: https://bugs.webkit.org/show_bug.cgi?id=150459
+
+void handleCalleeSaves(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirInsertionSet.cpp b/b3/air/AirInsertionSet.cpp
new file mode 100644
index 0000000..452d488
--- /dev/null
+++ b/b3/air/AirInsertionSet.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirInsertionSet.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void InsertionSet::insertInsts(size_t index, Vector&& insts)
+{
+    for (Inst& inst : insts)
+        insertInst(index, WTFMove(inst));
+}
+
+void InsertionSet::execute(BasicBlock* block)
+{
+    bubbleSort(m_insertions.begin(), m_insertions.end());
+    executeInsertions(block->m_insts, m_insertions);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirInsertionSet.h b/b3/air/AirInsertionSet.h
new file mode 100644
index 0000000..84a791d
--- /dev/null
+++ b/b3/air/AirInsertionSet.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+typedef WTF::Insertion Insertion;
+
+class InsertionSet {
+public:
+    InsertionSet(Code& code)
+        : m_code(code)
+    {
+    }
+
+    Code& code() { return m_code; }
+
+    template
+    void appendInsertion(T&& insertion)
+    {
+        m_insertions.append(std::forward(insertion));
+    }
+
+    template
+    void insertInst(size_t index, Inst&& inst)
+    {
+        appendInsertion(Insertion(index, std::forward(inst)));
+    }
+
+    template 
+    void insertInsts(size_t index, const InstVector& insts)
+    {
+        for (const Inst& inst : insts)
+            insertInst(index, inst);
+    }
+    void insertInsts(size_t index, Vector&&);
+    
+    template
+    void insert(size_t index, Arguments&&... arguments)
+    {
+        insertInst(index, Inst(std::forward(arguments)...));
+    }
+
+    void execute(BasicBlock*);
+
+private:
+    Code& m_code;
+    Vector m_insertions;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirInst.cpp b/b3/air/AirInst.cpp
new file mode 100644
index 0000000..defb344
--- /dev/null
+++ b/b3/air/AirInst.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirInst.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirInstInlines.h"
+#include "B3Value.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool Inst::hasArgEffects()
+{
+    bool result = false;
+    forEachArg(
+        [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+            if (Arg::isAnyDef(role))
+                result = true;
+        });
+    return result;
+}
+
+unsigned Inst::jsHash() const
+{
+    // FIXME: This should do something for flags.
+    // https://bugs.webkit.org/show_bug.cgi?id=162751
+    unsigned result = static_cast(kind.opcode);
+    
+    for (const Arg& arg : args)
+        result += arg.jsHash();
+    
+    return result;
+}
+
+void Inst::dump(PrintStream& out) const
+{
+    out.print(kind, " ", listDump(args));
+    if (origin) {
+        if (args.size())
+            out.print(", ");
+        out.print(*origin);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirInst.h b/b3/air/AirInst.h
new file mode 100644
index 0000000..f38c21d
--- /dev/null
+++ b/b3/air/AirInst.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirKind.h"
+#include "CCallHelpers.h"
+
+namespace JSC {
+
+class CCallHelpers;
+class RegisterSet;
+
+namespace B3 {
+
+class Value;
+
+namespace Air {
+
+struct GenerationContext;
+
+struct Inst {
+public:
+    typedef Vector ArgList;
+
+    Inst()
+        : origin(nullptr)
+    {
+    }
+    
+    Inst(Kind kind, Value* origin)
+        : origin(origin)
+        , kind(kind)
+    {
+    }
+    
+    template
+    Inst(Kind kind, Value* origin, Arg arg, Arguments... arguments)
+        : args{ arg, arguments... }
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    Inst(Kind kind, Value* origin, const ArgList& arguments)
+        : args(arguments)
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    Inst(Kind kind, Value* origin, ArgList&& arguments)
+        : args(WTFMove(arguments))
+        , origin(origin)
+        , kind(kind)
+    {
+    }
+
+    explicit operator bool() const { return origin || kind || args.size(); }
+
+    void append() { }
+    
+    template
+    void append(Arg arg, Arguments... arguments)
+    {
+        args.append(arg);
+        append(arguments...);
+    }
+
+    // Note that these functors all avoid using "const" because we want to use them for things that
+    // edit IR. IR is meant to be edited; if you're carrying around a "const Inst&" then you're
+    // probably doing it wrong.
+
+    // This only walks those Tmps that are explicitly mentioned, and it doesn't tell you their role
+    // or type.
+    template
+    void forEachTmpFast(const Functor& functor)
+    {
+        for (Arg& arg : args)
+            arg.forEachTmpFast(functor);
+    }
+
+    typedef void EachArgCallback(Arg&, Arg::Role, Arg::Type, Arg::Width);
+    
+    // Calls the functor with (arg, role, type, width). This function is auto-generated by
+    // opcode_generator.rb.
+    template
+    void forEachArg(const Functor&);
+
+    // Calls the functor with (tmp, role, type, width).
+    template
+    void forEachTmp(const Functor& functor)
+    {
+        forEachArg(
+            [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+                arg.forEachTmp(role, type, width, functor);
+            });
+    }
+
+    // Thing can be either Arg, Tmp, or StackSlot*.
+    template
+    void forEach(const Functor&);
+
+    // Reports any additional registers clobbered by this operation. Note that for efficiency,
+    // extraClobberedRegs() only works for the Patch opcode.
+    RegisterSet extraClobberedRegs();
+    RegisterSet extraEarlyClobberedRegs();
+
+    // Iterate over all Def's that happen at the end of an instruction. You supply a pair
+    // instructions. The instructions must appear next to each other, in that order, in some basic
+    // block. You can pass null for the first instruction when analyzing what happens at the top of
+    // a basic block. You can pass null for the second instruction when analyzing what happens at the
+    // bottom of a basic block.
+    template
+    static void forEachDef(Inst* prevInst, Inst* nextInst, const Functor&);
+
+    // Iterate over all Def's that happen at the end of this instruction, including extra clobbered
+    // registers. Note that Thing can only be Arg or Tmp when you use this functor.
+    template
+    static void forEachDefWithExtraClobberedRegs(Inst* prevInst, Inst* nextInst, const Functor&);
+
+    // Use this to report which registers are live. This should be done just before codegen. Note
+    // that for efficiency, reportUsedRegisters() only works for the Patch opcode.
+    void reportUsedRegisters(const RegisterSet&);
+
+    // Is this instruction in one of the valid forms right now? This function is auto-generated by
+    // opcode_generator.rb.
+    bool isValidForm();
+
+    // Assuming this instruction is in a valid form right now, will it still be in one of the valid
+    // forms if we put an Addr referencing the stack (or a StackSlot or CallArg, of course) in the
+    // given index? Spilling uses this: it walks the args by index to find Tmps that need spilling;
+    // if it finds one, it calls this to see if it can replace the Arg::Tmp with an Arg::Addr. If it
+    // finds a non-Tmp Arg, then it calls that Arg's forEachTmp to do a replacement that way.
+    //
+    // This function is auto-generated by opcode_generator.rb.
+    bool admitsStack(unsigned argIndex);
+    bool admitsStack(Arg&);
+    
+    // Defined by opcode_generator.rb.
+    bool isTerminal();
+
+    // Returns true if this instruction can have any effects other than control flow or arguments.
+    bool hasNonArgNonControlEffects();
+
+    // Returns true if this instruction can have any effects other than what is implied by arguments.
+    // For example, "Move $42, (%rax)" will return false because the effect of storing to (%rax) is
+    // implied by the second argument.
+    bool hasNonArgEffects();
+
+    // Tells you if this operation has arg effects.
+    bool hasArgEffects();
+    
+    // Tells you if this operation has non-control effects.
+    bool hasNonControlEffects() { return hasNonArgNonControlEffects() || hasArgEffects(); }
+
+    // Generate some code for this instruction. This is, like, literally our backend. If this is the
+    // terminal, it returns the jump that needs to be linked for the "then" case, with the "else"
+    // case being fall-through. This function is auto-generated by opcode_generator.rb.
+    CCallHelpers::Jump generate(CCallHelpers&, GenerationContext&);
+
+    // If source arguments benefits from being aliased to a destination argument,
+    // this return the index of the destination argument.
+    // The source are assumed to be at (index - 1) and (index - 2)
+    // For example,
+    //     Add Tmp1, Tmp2, Tmp3
+    // returns 2 if 0 and 1 benefit from aliasing to Tmp3.
+    std::optional shouldTryAliasingDef();
+    
+    // This computes a hash for comparing this to JSAir's Inst.
+    unsigned jsHash() const;
+
+    void dump(PrintStream&) const;
+
+    ArgList args;
+    Value* origin; // The B3::Value that this originated from.
+    Kind kind;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirInstInlines.h b/b3/air/AirInstInlines.h
new file mode 100644
index 0000000..b753921
--- /dev/null
+++ b/b3/air/AirInstInlines.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "AirOpcodeUtils.h"
+#include "AirSpecial.h"
+#include "AirStackSlot.h"
+#include "B3Value.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+template
+void Inst::forEach(const Functor& functor)
+{
+    forEachArg(
+        [&] (Arg& arg, Arg::Role role, Arg::Type type, Arg::Width width) {
+            arg.forEach(role, type, width, functor);
+        });
+}
+
+inline RegisterSet Inst::extraClobberedRegs()
+{
+    ASSERT(kind.opcode == Patch);
+    return args[0].special()->extraClobberedRegs(*this);
+}
+
+inline RegisterSet Inst::extraEarlyClobberedRegs()
+{
+    ASSERT(kind.opcode == Patch);
+    return args[0].special()->extraEarlyClobberedRegs(*this);
+}
+
+template
+inline void Inst::forEachDef(Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+    if (prevInst) {
+        prevInst->forEach(
+            [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+                if (Arg::isLateDef(role))
+                    functor(thing, role, argType, argWidth);
+            });
+    }
+
+    if (nextInst) {
+        nextInst->forEach(
+            [&] (Thing& thing, Arg::Role role, Arg::Type argType, Arg::Width argWidth) {
+                if (Arg::isEarlyDef(role))
+                    functor(thing, role, argType, argWidth);
+            });
+    }
+}
+
+template
+inline void Inst::forEachDefWithExtraClobberedRegs(
+    Inst* prevInst, Inst* nextInst, const Functor& functor)
+{
+    forEachDef(prevInst, nextInst, functor);
+
+    Arg::Role regDefRole;
+    
+    auto reportReg = [&] (Reg reg) {
+        Arg::Type type = reg.isGPR() ? Arg::GP : Arg::FP;
+        functor(Thing(reg), regDefRole, type, Arg::conservativeWidth(type));
+    };
+
+    if (prevInst && prevInst->kind.opcode == Patch) {
+        regDefRole = Arg::Def;
+        prevInst->extraClobberedRegs().forEach(reportReg);
+    }
+
+    if (nextInst && nextInst->kind.opcode == Patch) {
+        regDefRole = Arg::EarlyDef;
+        nextInst->extraEarlyClobberedRegs().forEach(reportReg);
+    }
+}
+
+inline void Inst::reportUsedRegisters(const RegisterSet& usedRegisters)
+{
+    ASSERT(kind.opcode == Patch);
+    args[0].special()->reportUsedRegisters(*this, usedRegisters);
+}
+
+inline bool Inst::admitsStack(Arg& arg)
+{
+    return admitsStack(&arg - &args[0]);
+}
+
+inline std::optional Inst::shouldTryAliasingDef()
+{
+    if (!isX86())
+        return std::nullopt;
+
+    switch (kind.opcode) {
+    case Add32:
+    case Add64:
+    case And32:
+    case And64:
+    case Mul32:
+    case Mul64:
+    case Or32:
+    case Or64:
+    case Xor32:
+    case Xor64:
+    case AndFloat:
+    case AndDouble:
+    case XorDouble:
+    case XorFloat:
+        if (args.size() == 3)
+            return 2;
+        break;
+    case AddDouble:
+    case AddFloat:
+    case MulDouble:
+    case MulFloat:
+#if CPU(X86) || CPU(X86_64)
+        if (MacroAssembler::supportsAVX())
+            return std::nullopt;
+#endif
+        if (args.size() == 3)
+            return 2;
+        break;
+    case BranchAdd32:
+    case BranchAdd64:
+        if (args.size() == 4)
+            return 3;
+        break;
+    case MoveConditionally32:
+    case MoveConditionally64:
+    case MoveConditionallyTest32:
+    case MoveConditionallyTest64:
+    case MoveConditionallyDouble:
+    case MoveConditionallyFloat:
+    case MoveDoubleConditionally32:
+    case MoveDoubleConditionally64:
+    case MoveDoubleConditionallyTest32:
+    case MoveDoubleConditionallyTest64:
+    case MoveDoubleConditionallyDouble:
+    case MoveDoubleConditionallyFloat:
+        if (args.size() == 6)
+            return 5;
+        break;
+        break;
+    case Patch:
+        return PatchCustom::shouldTryAliasingDef(*this);
+    default:
+        break;
+    }
+    return std::nullopt;
+}
+
+inline bool isShiftValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+    return inst.args[0] == Tmp(X86Registers::ecx);
+#else
+    UNUSED_PARAM(inst);
+    return true;
+#endif
+}
+
+inline bool isLshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isLshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isUrshift32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isUrshift64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateRight32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateLeft32Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateRight64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isRotateLeft64Valid(const Inst& inst)
+{
+    return isShiftValid(inst);
+}
+
+inline bool isX86DivHelperValid(const Inst& inst)
+{
+#if CPU(X86) || CPU(X86_64)
+    return inst.args[0] == Tmp(X86Registers::eax)
+        && inst.args[1] == Tmp(X86Registers::edx);
+#else
+    UNUSED_PARAM(inst);
+    return false;
+#endif
+}
+
+inline bool isX86ConvertToDoubleWord32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86ConvertToQuadWord64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv32Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86Div64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+inline bool isX86UDiv64Valid(const Inst& inst)
+{
+    return isX86DivHelperValid(inst);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirIteratedRegisterCoalescing.cpp b/b3/air/AirIteratedRegisterCoalescing.cpp
new file mode 100644
index 0000000..7e81b5e
--- /dev/null
+++ b/b3/air/AirIteratedRegisterCoalescing.cpp
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirIteratedRegisterCoalescing.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include "AirTmpInlines.h"
+#include "AirTmpWidth.h"
+#include "AirUseCounts.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool debug = false;
+bool traceDebug = false;
+bool reportStats = false;
+
+// The AbstractColoringAllocator defines all the code that is independant
+// from the type or register and can be shared when allocating registers.
+template
+class AbstractColoringAllocator {
+public:
+    AbstractColoringAllocator(const Vector& regsInPriorityOrder, IndexType lastPrecoloredRegisterIndex, unsigned tmpArraySize, const HashSet& unspillableTmp)
+        : m_regsInPriorityOrder(regsInPriorityOrder)
+        , m_lastPrecoloredRegisterIndex(lastPrecoloredRegisterIndex)
+        , m_unspillableTmps(unspillableTmp)
+    {
+        for (Reg reg : m_regsInPriorityOrder)
+            m_mutableRegs.set(reg);
+        
+        initializeDegrees(tmpArraySize);
+        
+        m_adjacencyList.resize(tmpArraySize);
+        m_moveList.resize(tmpArraySize);
+        m_coalescedTmps.fill(0, tmpArraySize);
+        m_isOnSelectStack.ensureSize(tmpArraySize);
+    }
+
+protected:
+    IndexType getAlias(IndexType tmpIndex) const
+    {
+        IndexType alias = tmpIndex;
+        while (IndexType nextAlias = m_coalescedTmps[alias])
+            alias = nextAlias;
+        return alias;
+    }
+
+    void addEdge(IndexType a, IndexType b)
+    {
+        if (a == b)
+            return;
+        addEdgeDistinct(a, b);
+    }
+
+    void makeWorkList()
+    {
+        IndexType firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+        for (IndexType i = firstNonRegIndex; i < m_degrees.size(); ++i) {
+            unsigned degree = m_degrees[i];
+            if (degree >= m_regsInPriorityOrder.size())
+                addToSpill(i);
+            else if (!m_moveList[i].isEmpty())
+                m_freezeWorklist.add(i);
+            else
+                m_simplifyWorklist.append(i);
+        }
+    }
+
+    void addToSpill(unsigned toSpill)
+    {
+        if (m_unspillableTmps.contains(toSpill))
+            return;
+
+        m_spillWorklist.add(toSpill);
+    }
+
+    // Low-degree vertex can always be colored: just pick any of the color taken by any
+    // other adjacent verices.
+    // The "Simplify" phase takes a low-degree out of the interference graph to simplify it.
+    void simplify()
+    {
+        IndexType lastIndex = m_simplifyWorklist.takeLast();
+
+        ASSERT(!m_selectStack.contains(lastIndex));
+        ASSERT(!m_isOnSelectStack.get(lastIndex));
+        m_selectStack.append(lastIndex);
+        m_isOnSelectStack.quickSet(lastIndex);
+
+        forEachAdjacent(lastIndex, [this](IndexType adjacentTmpIndex) {
+            decrementDegree(adjacentTmpIndex);
+        });
+    }
+
+    void freeze()
+    {
+        IndexType victimIndex = m_freezeWorklist.takeAny();
+        ASSERT_WITH_MESSAGE(getAlias(victimIndex) == victimIndex, "coalesce() should not leave aliased Tmp in the worklist.");
+        m_simplifyWorklist.append(victimIndex);
+        freezeMoves(victimIndex);
+    }
+
+    void freezeMoves(IndexType tmpIndex)
+    {
+        forEachNodeMoves(tmpIndex, [this, tmpIndex] (IndexType moveIndex) {
+            if (!m_activeMoves.quickClear(moveIndex))
+                m_worklistMoves.takeMove(moveIndex);
+
+            const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+            IndexType srcTmpIndex = moveOperands.srcIndex;
+            IndexType dstTmpIndex = moveOperands.dstIndex;
+
+            IndexType originalOtherTmp = srcTmpIndex != tmpIndex ? srcTmpIndex : dstTmpIndex;
+            IndexType otherTmpIndex = getAlias(originalOtherTmp);
+            if (m_degrees[otherTmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(otherTmpIndex)) {
+                if (m_freezeWorklist.remove(otherTmpIndex))
+                    m_simplifyWorklist.append(otherTmpIndex);
+            }
+        });
+    }
+
+    void coalesce()
+    {
+        unsigned moveIndex = m_worklistMoves.takeLastMove();
+        const MoveOperands& moveOperands = m_coalescingCandidates[moveIndex];
+        IndexType u = getAlias(moveOperands.srcIndex);
+        IndexType v = getAlias(moveOperands.dstIndex);
+
+        if (isPrecolored(v))
+            std::swap(u, v);
+
+        if (traceDebug)
+            dataLog("Coalescing move at index ", moveIndex, " u = ", u, " v = ", v, "\n");
+
+        if (u == v) {
+            addWorkList(u);
+
+            if (traceDebug)
+                dataLog("    Coalesced\n");
+        } else if (isPrecolored(v)
+            || m_interferenceEdges.contains(InterferenceEdge(u, v))
+            || (u == m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))) {
+            addWorkList(u);
+            addWorkList(v);
+
+            if (traceDebug)
+                dataLog("    Constrained\n");
+        } else if (canBeSafelyCoalesced(u, v)) {
+            combine(u, v);
+            addWorkList(u);
+            m_hasCoalescedNonTrivialMove = true;
+
+            if (traceDebug)
+                dataLog("    Safe Coalescing\n");
+        } else {
+            m_activeMoves.quickSet(moveIndex);
+
+            if (traceDebug)
+                dataLog("    Failed coalescing, added to active moves.\n");
+        }
+    }
+
+    void assignColors()
+    {
+        ASSERT(m_simplifyWorklist.isEmpty());
+        ASSERT(m_worklistMoves.isEmpty());
+        ASSERT(m_freezeWorklist.isEmpty());
+        ASSERT(m_spillWorklist.isEmpty());
+
+        // Reclaim as much memory as possible.
+        m_interferenceEdges.clear();
+        m_degrees.clear();
+        m_moveList.clear();
+        m_worklistMoves.clear();
+        m_simplifyWorklist.clear();
+        m_spillWorklist.clear();
+        m_freezeWorklist.clear();
+
+        // Try to color the Tmp on the stack.
+        m_coloredTmp.resize(m_adjacencyList.size());
+
+        while (!m_selectStack.isEmpty()) {
+            unsigned tmpIndex = m_selectStack.takeLast();
+            ASSERT(!isPrecolored(tmpIndex));
+            ASSERT(!m_coloredTmp[tmpIndex]);
+
+            RegisterSet coloredRegisters;
+            for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+                IndexType aliasTmpIndex = getAlias(adjacentTmpIndex);
+                Reg reg = m_coloredTmp[aliasTmpIndex];
+
+                ASSERT(!isPrecolored(aliasTmpIndex) || (isPrecolored(aliasTmpIndex) && reg));
+
+                if (reg)
+                    coloredRegisters.set(reg);
+            }
+
+            bool colorAssigned = false;
+            for (Reg reg : m_regsInPriorityOrder) {
+                if (!coloredRegisters.get(reg)) {
+                    m_coloredTmp[tmpIndex] = reg;
+                    colorAssigned = true;
+                    break;
+                }
+            }
+
+            if (!colorAssigned)
+                m_spilledTmps.append(tmpIndex);
+        }
+        m_selectStack.clear();
+
+        if (m_spilledTmps.isEmpty())
+            m_coalescedTmpsAtSpill.clear();
+        else
+            m_coloredTmp.clear();
+    }
+
+private:
+    void initializeDegrees(unsigned tmpArraySize)
+    {
+        m_degrees.resize(tmpArraySize);
+
+        // All precolored registers have  an "infinite" degree.
+        unsigned firstNonRegIndex = m_lastPrecoloredRegisterIndex + 1;
+        for (unsigned i = 0; i < firstNonRegIndex; ++i)
+            m_degrees[i] = std::numeric_limits::max();
+
+        memset(m_degrees.data() + firstNonRegIndex, 0, (tmpArraySize - firstNonRegIndex) * sizeof(unsigned));
+    }
+
+    void addEdgeDistinct(IndexType a, IndexType b)
+    {
+        ASSERT(a != b);
+        if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+            if (!isPrecolored(a)) {
+                ASSERT(!m_adjacencyList[a].contains(b));
+                m_adjacencyList[a].append(b);
+                m_degrees[a]++;
+            }
+
+            if (!isPrecolored(b)) {
+                ASSERT(!m_adjacencyList[b].contains(a));
+                m_adjacencyList[b].append(a);
+                m_degrees[b]++;
+            }
+        }
+    }
+
+    void decrementDegree(IndexType tmpIndex)
+    {
+        ASSERT(m_degrees[tmpIndex]);
+
+        unsigned oldDegree = m_degrees[tmpIndex]--;
+        if (oldDegree == m_regsInPriorityOrder.size()) {
+            enableMovesOnValueAndAdjacents(tmpIndex);
+            m_spillWorklist.remove(tmpIndex);
+            if (isMoveRelated(tmpIndex))
+                m_freezeWorklist.add(tmpIndex);
+            else
+                m_simplifyWorklist.append(tmpIndex);
+        }
+    }
+
+
+    bool addEdgeDistinctWithoutDegreeChange(IndexType a, IndexType b)
+    {
+        ASSERT(a != b);
+        if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+            if (!isPrecolored(a)) {
+                ASSERT(!m_adjacencyList[a].contains(b));
+                m_adjacencyList[a].append(b);
+            }
+
+            if (!isPrecolored(b)) {
+                ASSERT(!m_adjacencyList[b].contains(a));
+                m_adjacencyList[b].append(a);
+            }
+            return true;
+        }
+        return false;
+    }
+
+    bool isMoveRelated(IndexType tmpIndex)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+                return true;
+        }
+        return false;
+    }
+
+    template
+    void forEachAdjacent(IndexType tmpIndex, Function function)
+    {
+        for (IndexType adjacentTmpIndex : m_adjacencyList[tmpIndex]) {
+            if (!hasBeenSimplified(adjacentTmpIndex))
+                function(adjacentTmpIndex);
+        }
+    }
+
+    bool hasBeenSimplified(IndexType tmpIndex)
+    {
+        return m_isOnSelectStack.quickGet(tmpIndex) || !!m_coalescedTmps[tmpIndex];
+    }
+
+    template
+    void forEachNodeMoves(IndexType tmpIndex, Function function)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickGet(moveIndex) || m_worklistMoves.contains(moveIndex))
+                function(moveIndex);
+        }
+    }
+
+    void enableMovesOnValue(IndexType tmpIndex)
+    {
+        for (unsigned moveIndex : m_moveList[tmpIndex]) {
+            if (m_activeMoves.quickClear(moveIndex))
+                m_worklistMoves.returnMove(moveIndex);
+        }
+    }
+
+    void enableMovesOnValueAndAdjacents(IndexType tmpIndex)
+    {
+        enableMovesOnValue(tmpIndex);
+
+        forEachAdjacent(tmpIndex, [this] (IndexType adjacentTmpIndex) {
+            enableMovesOnValue(adjacentTmpIndex);
+        });
+    }
+
+    bool isPrecolored(IndexType tmpIndex)
+    {
+        return tmpIndex <= m_lastPrecoloredRegisterIndex;
+    }
+
+    void addWorkList(IndexType tmpIndex)
+    {
+        if (!isPrecolored(tmpIndex) && m_degrees[tmpIndex] < m_regsInPriorityOrder.size() && !isMoveRelated(tmpIndex)) {
+            m_freezeWorklist.remove(tmpIndex);
+            m_simplifyWorklist.append(tmpIndex);
+        }
+    }
+
+    void combine(IndexType u, IndexType v)
+    {
+        if (!m_freezeWorklist.remove(v))
+            m_spillWorklist.remove(v);
+
+        ASSERT(!m_coalescedTmps[v]);
+        m_coalescedTmps[v] = u;
+
+        auto& vMoves = m_moveList[v];
+        m_moveList[u].add(vMoves.begin(), vMoves.end());
+
+        forEachAdjacent(v, [this, u] (IndexType adjacentTmpIndex) {
+            if (addEdgeDistinctWithoutDegreeChange(adjacentTmpIndex, u)) {
+                // If we added a new edge between the adjacentTmp and u, it replaces the edge
+                // that existed with v.
+                // The degree of adjacentTmp remains the same since the edge just changed from u to v.
+                // All we need to do is update the degree of u.
+                if (!isPrecolored(u))
+                    m_degrees[u]++;
+            } else {
+                // If we already had an edge between the adjacentTmp and u, the degree of u
+                // is already correct. The degree of the adjacentTmp decreases since the edge
+                // with v is no longer relevant (we can think of it as merged with the edge with u).
+                decrementDegree(adjacentTmpIndex);
+            }
+        });
+
+        if (m_framePointerIndex && m_interferesWithFramePointer.quickGet(v))
+            m_interferesWithFramePointer.quickSet(u);
+
+        if (m_degrees[u] >= m_regsInPriorityOrder.size() && m_freezeWorklist.remove(u))
+            addToSpill(u);
+    }
+
+    bool canBeSafelyCoalesced(IndexType u, IndexType v)
+    {
+        ASSERT(!isPrecolored(v));
+        if (isPrecolored(u))
+            return precoloredCoalescingHeuristic(u, v);
+        return conservativeHeuristic(u, v);
+    }
+
+    bool conservativeHeuristic(IndexType u, IndexType v)
+    {
+        // This is using the Briggs' conservative coalescing rule:
+        // If the number of combined adjacent node with a degree >= K is less than K,
+        // it is safe to combine the two nodes. The reason is that we know that if the graph
+        // is colorable, we have fewer than K adjacents with high order and there is a color
+        // for the current node.
+        ASSERT(u != v);
+        ASSERT(!isPrecolored(u));
+        ASSERT(!isPrecolored(v));
+
+        const auto& adjacentsOfU = m_adjacencyList[u];
+        const auto& adjacentsOfV = m_adjacencyList[v];
+
+        if (adjacentsOfU.size() + adjacentsOfV.size() < m_regsInPriorityOrder.size()) {
+            // Shortcut: if the total number of adjacents is less than the number of register, the condition is always met.
+            return true;
+        }
+
+        HashSet highOrderAdjacents;
+
+        for (IndexType adjacentTmpIndex : adjacentsOfU) {
+            ASSERT(adjacentTmpIndex != v);
+            ASSERT(adjacentTmpIndex != u);
+            if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+                auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+                if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+                    return false;
+            }
+        }
+        for (IndexType adjacentTmpIndex : adjacentsOfV) {
+            ASSERT(adjacentTmpIndex != u);
+            ASSERT(adjacentTmpIndex != v);
+            if (!hasBeenSimplified(adjacentTmpIndex) && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()) {
+                auto addResult = highOrderAdjacents.add(adjacentTmpIndex);
+                if (addResult.isNewEntry && highOrderAdjacents.size() >= m_regsInPriorityOrder.size())
+                    return false;
+            }
+        }
+
+        ASSERT(highOrderAdjacents.size() < m_regsInPriorityOrder.size());
+        return true;
+    }
+
+    bool precoloredCoalescingHeuristic(IndexType u, IndexType v)
+    {
+        if (traceDebug)
+            dataLog("    Checking precoloredCoalescingHeuristic\n");
+        ASSERT(isPrecolored(u));
+        ASSERT(!isPrecolored(v));
+        
+        // If u is a pinned register then it's always safe to coalesce. Note that when we call this,
+        // we have already proved that there is no interference between u and v.
+        if (!m_mutableRegs.get(m_coloredTmp[u]))
+            return true;
+
+        // If any adjacent of the non-colored node is not an adjacent of the colored node AND has a degree >= K
+        // there is a risk that this node needs to have the same color as our precolored node. If we coalesce such
+        // move, we may create an uncolorable graph.
+        const auto& adjacentsOfV = m_adjacencyList[v];
+        for (unsigned adjacentTmpIndex : adjacentsOfV) {
+            if (!isPrecolored(adjacentTmpIndex)
+                && !hasBeenSimplified(adjacentTmpIndex)
+                && m_degrees[adjacentTmpIndex] >= m_regsInPriorityOrder.size()
+                && !m_interferenceEdges.contains(InterferenceEdge(u, adjacentTmpIndex)))
+                return false;
+        }
+        return true;
+    }
+
+protected:
+#if PLATFORM(COCOA)
+#pragma mark -
+#endif
+
+    // Interference edges are not directed. An edge between any two Tmps is represented
+    // by the concatenated values of the smallest Tmp followed by the bigger Tmp.
+    class InterferenceEdge {
+    public:
+        InterferenceEdge()
+        {
+        }
+
+        InterferenceEdge(IndexType a, IndexType b)
+        {
+            ASSERT(a);
+            ASSERT(b);
+            ASSERT_WITH_MESSAGE(a != b, "A Tmp can never interfere with itself. Doing so would force it to be the superposition of two registers.");
+
+            if (b < a)
+                std::swap(a, b);
+            m_value = static_cast(a) << 32 | b;
+        }
+
+        InterferenceEdge(WTF::HashTableDeletedValueType)
+            : m_value(std::numeric_limits::max())
+        {
+        }
+
+        IndexType first() const
+        {
+            return m_value >> 32 & 0xffffffff;
+        }
+
+        IndexType second() const
+        {
+            return m_value & 0xffffffff;
+        }
+
+        bool operator==(const InterferenceEdge other) const
+        {
+            return m_value == other.m_value;
+        }
+
+        bool isHashTableDeletedValue() const
+        {
+            return *this == InterferenceEdge(WTF::HashTableDeletedValue);
+        }
+
+        unsigned hash() const
+        {
+            return WTF::IntHash::hash(m_value);
+        }
+
+        void dump(PrintStream& out) const
+        {
+            out.print(first(), "<=>", second());
+        }
+
+    private:
+        uint64_t m_value { 0 };
+    };
+
+    struct InterferenceEdgeHash {
+        static unsigned hash(const InterferenceEdge& key) { return key.hash(); }
+        static bool equal(const InterferenceEdge& a, const InterferenceEdge& b) { return a == b; }
+        static const bool safeToCompareToEmptyOrDeleted = true;
+    };
+    typedef SimpleClassHashTraits InterferenceEdgeHashTraits;
+
+    const Vector& m_regsInPriorityOrder;
+    RegisterSet m_mutableRegs;
+    IndexType m_lastPrecoloredRegisterIndex { 0 };
+
+    // The interference graph.
+    HashSet m_interferenceEdges;
+    Vector, 0, UnsafeVectorOverflow> m_adjacencyList;
+    Vector m_degrees;
+
+    // Instead of keeping track of the move instructions, we just keep their operands around and use the index
+    // in the vector as the "identifier" for the move.
+    struct MoveOperands {
+        IndexType srcIndex;
+        IndexType dstIndex;
+    };
+    Vector m_coalescingCandidates;
+
+    // List of every move instruction associated with a Tmp.
+    Vector::Hash, WTF::UnsignedWithZeroKeyHashTraits>> m_moveList;
+
+    // Colors.
+    Vector m_coloredTmp;
+    Vector m_spilledTmps;
+
+    // Values that have been coalesced with an other value.
+    Vector m_coalescedTmps;
+
+    // The stack of Tmp removed from the graph and ready for coloring.
+    BitVector m_isOnSelectStack;
+    Vector m_selectStack;
+
+    IndexType m_framePointerIndex { 0 };
+    BitVector m_interferesWithFramePointer;
+
+    struct OrderedMoveSet {
+        unsigned addMove()
+        {
+            ASSERT(m_lowPriorityMoveList.isEmpty());
+            ASSERT(!m_firstLowPriorityMoveIndex);
+
+            unsigned nextIndex = m_positionInMoveList.size();
+            unsigned position = m_moveList.size();
+            m_moveList.append(nextIndex);
+            m_positionInMoveList.append(position);
+            return nextIndex;
+        }
+
+        void startAddingLowPriorityMoves()
+        {
+            ASSERT(m_lowPriorityMoveList.isEmpty());
+            m_firstLowPriorityMoveIndex = m_moveList.size();
+        }
+
+        unsigned addLowPriorityMove()
+        {
+            ASSERT(m_firstLowPriorityMoveIndex == m_moveList.size());
+
+            unsigned nextIndex = m_positionInMoveList.size();
+            unsigned position = m_lowPriorityMoveList.size();
+            m_lowPriorityMoveList.append(nextIndex);
+            m_positionInMoveList.append(position);
+
+            ASSERT(nextIndex >= m_firstLowPriorityMoveIndex);
+
+            return nextIndex;
+        }
+
+        bool isEmpty() const
+        {
+            return m_moveList.isEmpty() && m_lowPriorityMoveList.isEmpty();
+        }
+
+        bool contains(unsigned index)
+        {
+            return m_positionInMoveList[index] != std::numeric_limits::max();
+        }
+
+        void takeMove(unsigned moveIndex)
+        {
+            unsigned positionInMoveList = m_positionInMoveList[moveIndex];
+            if (positionInMoveList == std::numeric_limits::max())
+                return;
+
+            if (moveIndex < m_firstLowPriorityMoveIndex) {
+                ASSERT(m_moveList[positionInMoveList] == moveIndex);
+                unsigned lastIndex = m_moveList.last();
+                m_positionInMoveList[lastIndex] = positionInMoveList;
+                m_moveList[positionInMoveList] = lastIndex;
+                m_moveList.removeLast();
+            } else {
+                ASSERT(m_lowPriorityMoveList[positionInMoveList] == moveIndex);
+                unsigned lastIndex = m_lowPriorityMoveList.last();
+                m_positionInMoveList[lastIndex] = positionInMoveList;
+                m_lowPriorityMoveList[positionInMoveList] = lastIndex;
+                m_lowPriorityMoveList.removeLast();
+            }
+
+            m_positionInMoveList[moveIndex] = std::numeric_limits::max();
+
+            ASSERT(!contains(moveIndex));
+        }
+
+        unsigned takeLastMove()
+        {
+            ASSERT(!isEmpty());
+
+            unsigned lastIndex;
+            if (!m_moveList.isEmpty()) {
+                lastIndex = m_moveList.takeLast();
+                ASSERT(m_positionInMoveList[lastIndex] == m_moveList.size());
+            } else {
+                lastIndex = m_lowPriorityMoveList.takeLast();
+                ASSERT(m_positionInMoveList[lastIndex] == m_lowPriorityMoveList.size());
+            }
+            m_positionInMoveList[lastIndex] = std::numeric_limits::max();
+
+            ASSERT(!contains(lastIndex));
+            return lastIndex;
+        }
+
+        void returnMove(unsigned index)
+        {
+            // This assertion is a bit strict but that is how the move list should be used. The only kind of moves that can
+            // return to the list are the ones that we previously failed to coalesce with the conservative heuristics.
+            // Values should not be added back if they were never taken out when attempting coalescing.
+            ASSERT(!contains(index));
+
+            if (index < m_firstLowPriorityMoveIndex) {
+                unsigned position = m_moveList.size();
+                m_moveList.append(index);
+                m_positionInMoveList[index] = position;
+            } else {
+                unsigned position = m_lowPriorityMoveList.size();
+                m_lowPriorityMoveList.append(index);
+                m_positionInMoveList[index] = position;
+            }
+
+            ASSERT(contains(index));
+        }
+
+        void clear()
+        {
+            m_positionInMoveList.clear();
+            m_moveList.clear();
+            m_lowPriorityMoveList.clear();
+        }
+
+    private:
+        Vector m_positionInMoveList;
+        Vector m_moveList;
+        Vector m_lowPriorityMoveList;
+        unsigned m_firstLowPriorityMoveIndex { 0 };
+    };
+
+    // Work lists.
+    // Set of "move" enabled for possible coalescing.
+    OrderedMoveSet m_worklistMoves;
+    // Set of "move" not yet ready for coalescing.
+    BitVector m_activeMoves;
+    // Low-degree, non-Move related.
+    Vector m_simplifyWorklist;
+    // High-degree Tmp.
+    HashSet m_spillWorklist;
+    // Low-degree, Move related.
+    HashSet m_freezeWorklist;
+
+    bool m_hasSelectedSpill { false };
+    bool m_hasCoalescedNonTrivialMove { false };
+
+    // The mapping of Tmp to their alias for Moves that are always coalescing regardless of spilling.
+    Vector m_coalescedTmpsAtSpill;
+    
+    const HashSet& m_unspillableTmps;
+};
+
+// This perform all the tasks that are specific to certain register type.
+template
+class ColoringAllocator : public AbstractColoringAllocator {
+public:
+    ColoringAllocator(Code& code, TmpWidth& tmpWidth, const UseCounts& useCounts, const HashSet& unspillableTmp)
+        : AbstractColoringAllocator(code.regsInPriorityOrder(type), AbsoluteTmpMapper::lastMachineRegisterIndex(), tmpArraySize(code), unspillableTmp)
+        , m_code(code)
+        , m_tmpWidth(tmpWidth)
+        , m_useCounts(useCounts)
+    {
+        if (type == Arg::GP) {
+            m_framePointerIndex = AbsoluteTmpMapper::absoluteIndex(Tmp(MacroAssembler::framePointerRegister));
+            m_interferesWithFramePointer.ensureSize(tmpArraySize(code));
+        }
+
+        initializePrecoloredTmp();
+        build();
+        allocate();
+    }
+
+    Tmp getAlias(Tmp tmp) const
+    {
+        return AbsoluteTmpMapper::tmpFromAbsoluteIndex(getAlias(AbsoluteTmpMapper::absoluteIndex(tmp)));
+    }
+
+    // This tells you if a Move will be coalescable if the src and dst end up matching. This method
+    // relies on an analysis that is invalidated by register allocation, so you it's only meaningful to
+    // call this *before* replacing the Tmp's in this Inst with registers or spill slots.
+    bool mayBeCoalescable(const Inst& inst) const
+    {
+        return mayBeCoalescableImpl(inst, &m_tmpWidth);
+    }
+
+    bool isUselessMove(const Inst& inst) const
+    {
+        return mayBeCoalescableImpl(inst, nullptr) && inst.args[0].tmp() == inst.args[1].tmp();
+    }
+
+    Tmp getAliasWhenSpilling(Tmp tmp) const
+    {
+        ASSERT_WITH_MESSAGE(!m_spilledTmps.isEmpty(), "This function is only valid for coalescing during spilling.");
+
+        if (m_coalescedTmpsAtSpill.isEmpty())
+            return tmp;
+
+        unsigned aliasIndex = AbsoluteTmpMapper::absoluteIndex(tmp);
+        while (unsigned nextAliasIndex = m_coalescedTmpsAtSpill[aliasIndex])
+            aliasIndex = nextAliasIndex;
+
+        Tmp alias = AbsoluteTmpMapper::tmpFromAbsoluteIndex(aliasIndex);
+
+        ASSERT_WITH_MESSAGE(!m_spilledTmps.contains(aliasIndex) || alias == tmp, "The aliases at spill should always be colorable. Something went horribly wrong.");
+
+        return alias;
+    }
+
+    template
+    class IndexToTmpIteratorAdaptor {
+    public:
+        IndexToTmpIteratorAdaptor(IndexIterator&& indexIterator)
+            : m_indexIterator(WTFMove(indexIterator))
+        {
+        }
+
+        Tmp operator*() const { return AbsoluteTmpMapper::tmpFromAbsoluteIndex(*m_indexIterator); }
+        IndexToTmpIteratorAdaptor& operator++() { ++m_indexIterator; return *this; }
+
+        bool operator==(const IndexToTmpIteratorAdaptor& other) const
+        {
+            return m_indexIterator == other.m_indexIterator;
+        }
+
+        bool operator!=(const IndexToTmpIteratorAdaptor& other) const
+        {
+            return !(*this == other);
+        }
+
+    private:
+        IndexIterator m_indexIterator;
+    };
+
+    template
+    class IndexToTmpIterableAdaptor {
+    public:
+        IndexToTmpIterableAdaptor(const Collection& collection)
+            : m_collection(collection)
+        {
+        }
+
+        IndexToTmpIteratorAdaptor begin() const
+        {
+            return m_collection.begin();
+        }
+
+        IndexToTmpIteratorAdaptor end() const
+        {
+            return m_collection.end();
+        }
+
+    private:
+        const Collection& m_collection;
+    };
+
+    IndexToTmpIterableAdaptor> spilledTmps() const { return m_spilledTmps; }
+
+    bool requiresSpilling() const { return !m_spilledTmps.isEmpty(); }
+
+    Reg allocatedReg(Tmp tmp) const
+    {
+        ASSERT(!tmp.isReg());
+        ASSERT(m_coloredTmp.size());
+        ASSERT(tmp.isGP() == (type == Arg::GP));
+
+        Reg reg = m_coloredTmp[AbsoluteTmpMapper::absoluteIndex(tmp)];
+        if (!reg) {
+            dataLog("FATAL: No color for ", tmp, "\n");
+            dataLog("Code:\n");
+            dataLog(m_code);
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+        return reg;
+    }
+
+private:
+    static unsigned tmpArraySize(Code& code)
+    {
+        unsigned numTmps = code.numTmps(type);
+        return AbsoluteTmpMapper::absoluteIndex(numTmps);
+    }
+
+    void initializePrecoloredTmp()
+    {
+        m_coloredTmp.resize(m_lastPrecoloredRegisterIndex + 1);
+        for (unsigned i = 1; i <= m_lastPrecoloredRegisterIndex; ++i) {
+            Tmp tmp = AbsoluteTmpMapper::tmpFromAbsoluteIndex(i);
+            ASSERT(tmp.isReg());
+            m_coloredTmp[i] = tmp.reg();
+        }
+    }
+
+    bool mayBeCoalesced(Arg left, Arg right)
+    {
+        if (!left.isTmp() || !right.isTmp())
+            return false;
+
+        Tmp leftTmp = left.tmp();
+        Tmp rightTmp = right.tmp();
+
+        if (leftTmp == rightTmp)
+            return false;
+
+        if (leftTmp.isGP() != (type == Arg::GP) || rightTmp.isGP() != (type == Arg::GP))
+            return false;
+
+        unsigned leftIndex = AbsoluteTmpMapper::absoluteIndex(leftTmp);
+        unsigned rightIndex = AbsoluteTmpMapper::absoluteIndex(rightTmp);
+
+        return !m_interferenceEdges.contains(InterferenceEdge(leftIndex, rightIndex));
+    }
+
+    void addToLowPriorityCoalescingCandidates(Arg left, Arg right)
+    {
+        ASSERT(mayBeCoalesced(left, right));
+        Tmp leftTmp = left.tmp();
+        Tmp rightTmp = right.tmp();
+
+        unsigned leftIndex = AbsoluteTmpMapper::absoluteIndex(leftTmp);
+        unsigned rightIndex = AbsoluteTmpMapper::absoluteIndex(rightTmp);
+
+        unsigned nextMoveIndex = m_coalescingCandidates.size();
+        m_coalescingCandidates.append({ leftIndex, rightIndex });
+
+        unsigned newIndexInWorklist = m_worklistMoves.addLowPriorityMove();
+        ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+        ASSERT(nextMoveIndex <= m_activeMoves.size());
+        m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+        m_moveList[leftIndex].add(nextMoveIndex);
+        m_moveList[rightIndex].add(nextMoveIndex);
+    }
+
+    void build()
+    {
+        TmpLiveness liveness(m_code);
+        for (BasicBlock* block : m_code) {
+            typename TmpLiveness::LocalCalc localCalc(liveness, block);
+            for (unsigned instIndex = block->size(); instIndex--;) {
+                Inst& inst = block->at(instIndex);
+                Inst* nextInst = block->get(instIndex + 1);
+                build(&inst, nextInst, localCalc);
+                localCalc.execute(instIndex);
+            }
+            build(nullptr, &block->at(0), localCalc);
+        }
+        buildLowPriorityMoveList();
+    }
+
+    void build(Inst* prevInst, Inst* nextInst, const typename TmpLiveness::LocalCalc& localCalc)
+    {
+        if (traceDebug)
+            dataLog("Building between ", pointerDump(prevInst), " and ", pointerDump(nextInst), ":\n");
+        Inst::forEachDefWithExtraClobberedRegs(
+            prevInst, nextInst,
+            [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                if (argType != type)
+                    return;
+                
+                // All the Def()s interfere with each other and with all the extra clobbered Tmps.
+                // We should not use forEachDefWithExtraClobberedRegs() here since colored Tmps
+                // do not need interference edges in our implementation.
+                Inst::forEachDef(
+                    prevInst, nextInst,
+                    [&] (Tmp& otherArg, Arg::Role, Arg::Type argType, Arg::Width) {
+                        if (argType != type)
+                            return;
+                        
+                        if (traceDebug)
+                            dataLog("    Adding def-def edge: ", arg, ", ", otherArg, "\n");
+                        this->addEdge(arg, otherArg);
+                    });
+            });
+
+        if (prevInst && mayBeCoalescable(*prevInst)) {
+            // We do not want the Use() of this move to interfere with the Def(), even if it is live
+            // after the Move. If we were to add the interference edge, it would be impossible to
+            // coalesce the Move even if the two Tmp never interfere anywhere.
+            Tmp defTmp;
+            Tmp useTmp;
+            prevInst->forEachTmp([&defTmp, &useTmp] (Tmp& argTmp, Arg::Role role, Arg::Type, Arg::Width) {
+                if (Arg::isLateDef(role))
+                    defTmp = argTmp;
+                else {
+                    ASSERT(Arg::isEarlyUse(role));
+                    useTmp = argTmp;
+                }
+            });
+            ASSERT(defTmp);
+            ASSERT(useTmp);
+
+            unsigned nextMoveIndex = m_coalescingCandidates.size();
+            m_coalescingCandidates.append({ AbsoluteTmpMapper::absoluteIndex(useTmp), AbsoluteTmpMapper::absoluteIndex(defTmp) });
+
+            unsigned newIndexInWorklist = m_worklistMoves.addMove();
+            ASSERT_UNUSED(newIndexInWorklist, newIndexInWorklist == nextMoveIndex);
+
+            ASSERT(nextMoveIndex <= m_activeMoves.size());
+            m_activeMoves.ensureSize(nextMoveIndex + 1);
+
+            for (const Arg& arg : prevInst->args) {
+                auto& list = m_moveList[AbsoluteTmpMapper::absoluteIndex(arg.tmp())];
+                list.add(nextMoveIndex);
+            }
+
+            for (const Tmp& liveTmp : localCalc.live()) {
+                if (liveTmp != useTmp) {
+                    if (traceDebug)
+                        dataLog("    Adding def-live for coalescable: ", defTmp, ", ", liveTmp, "\n");
+                    addEdge(defTmp, liveTmp);
+                }
+            }
+
+            // The next instruction could have early clobbers or early def's. We need to consider
+            // those now.
+            addEdges(nullptr, nextInst, localCalc.live());
+        } else
+            addEdges(prevInst, nextInst, localCalc.live());
+    }
+
+    void buildLowPriorityMoveList()
+    {
+        if (!isX86())
+            return;
+
+        m_worklistMoves.startAddingLowPriorityMoves();
+        for (BasicBlock* block : m_code) {
+            for (Inst& inst : *block) {
+                if (std::optional defArgIndex = inst.shouldTryAliasingDef()) {
+                    Arg op1 = inst.args[*defArgIndex - 2];
+                    Arg op2 = inst.args[*defArgIndex - 1];
+                    Arg dest = inst.args[*defArgIndex];
+
+                    if (op1 == dest || op2 == dest)
+                        continue;
+
+                    if (mayBeCoalesced(op1, dest))
+                        addToLowPriorityCoalescingCandidates(op1, dest);
+                    if (op1 != op2 && mayBeCoalesced(op2, dest))
+                        addToLowPriorityCoalescingCandidates(op2, dest);
+                }
+            }
+        }
+    }
+
+    void addEdges(Inst* prevInst, Inst* nextInst, typename TmpLiveness::LocalCalc::Iterable liveTmps)
+    {
+        // All the Def()s interfere with everthing live.
+        Inst::forEachDefWithExtraClobberedRegs(
+            prevInst, nextInst,
+            [&] (const Tmp& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                if (argType != type)
+                    return;
+                
+                for (const Tmp& liveTmp : liveTmps) {
+                    ASSERT(liveTmp.isGP() == (type == Arg::GP));
+                    
+                    if (traceDebug)
+                        dataLog("    Adding def-live edge: ", arg, ", ", liveTmp, "\n");
+                    
+                    addEdge(arg, liveTmp);
+                }
+
+                if (type == Arg::GP && !arg.isGPR())
+                    m_interferesWithFramePointer.quickSet(AbsoluteTmpMapper::absoluteIndex(arg));
+            });
+    }
+
+    void addEdge(Tmp a, Tmp b)
+    {
+        ASSERT_WITH_MESSAGE(a.isGP() == b.isGP(), "An interference between registers of different types does not make sense, it can lead to non-colorable graphs.");
+
+        addEdge(AbsoluteTmpMapper::absoluteIndex(a), AbsoluteTmpMapper::absoluteIndex(b));
+    }
+
+    // Calling this without a tmpWidth will perform a more conservative coalescing analysis that assumes
+    // that Move32's are not coalescable.
+    static bool mayBeCoalescableImpl(const Inst& inst, TmpWidth* tmpWidth)
+    {
+        switch (type) {
+        case Arg::GP:
+            switch (inst.kind.opcode) {
+            case Move:
+            case Move32:
+                break;
+            default:
+                return false;
+            }
+            break;
+        case Arg::FP:
+            switch (inst.kind.opcode) {
+            case MoveFloat:
+            case MoveDouble:
+                break;
+            default:
+                return false;
+            }
+            break;
+        }
+
+        ASSERT_WITH_MESSAGE(inst.args.size() == 2, "We assume coalecable moves only have two arguments in a few places.");
+
+        if (!inst.args[0].isTmp() || !inst.args[1].isTmp())
+            return false;
+
+        ASSERT(inst.args[0].type() == type);
+        ASSERT(inst.args[1].type() == type);
+
+        // We can coalesce a Move32 so long as either of the following holds:
+        // - The input is already zero-filled.
+        // - The output only cares about the low 32 bits.
+        //
+        // Note that the input property requires an analysis over ZDef's, so it's only valid so long
+        // as the input gets a register. We don't know if the input gets a register, but we do know
+        // that if it doesn't get a register then we will still emit this Move32.
+        if (inst.kind.opcode == Move32) {
+            if (!tmpWidth)
+                return false;
+
+            if (tmpWidth->defWidth(inst.args[0].tmp()) > Arg::Width32
+                && tmpWidth->useWidth(inst.args[1].tmp()) > Arg::Width32)
+                return false;
+        }
+        
+        return true;
+    }
+
+    void selectSpill()
+    {
+        if (!m_hasSelectedSpill) {
+            m_hasSelectedSpill = true;
+
+            if (m_hasCoalescedNonTrivialMove)
+                m_coalescedTmpsAtSpill = m_coalescedTmps;
+        }
+
+        auto iterator = m_spillWorklist.begin();
+
+        RELEASE_ASSERT_WITH_MESSAGE(iterator != m_spillWorklist.end(), "selectSpill() called when there was no spill.");
+        RELEASE_ASSERT_WITH_MESSAGE(!m_unspillableTmps.contains(*iterator), "trying to spill unspillable tmp");
+
+        // Higher score means more desirable to spill. Lower scores maximize the likelihood that a tmp
+        // gets a register.
+        auto score = [&] (Tmp tmp) -> double {
+            // Air exposes the concept of "fast tmps", and we interpret that to mean that the tmp
+            // should always be in a register.
+            if (m_code.isFastTmp(tmp))
+                return 0;
+            
+            // All else being equal, the score should be directly related to the degree.
+            double degree = static_cast(m_degrees[AbsoluteTmpMapper::absoluteIndex(tmp)]);
+
+            // All else being equal, the score should be inversely related to the number of warm uses and
+            // defs.
+            const UseCounts::Counts* counts = m_useCounts[tmp];
+            if (!counts)
+                return std::numeric_limits::infinity();
+            
+            double uses = counts->numWarmUses + counts->numDefs;
+
+            // If it's a constant, then it's not as bad to spill. We can rematerialize it in many
+            // cases.
+            if (counts->numConstDefs == 1 && counts->numDefs == 1)
+                uses /= 2;
+
+            return degree / uses;
+        };
+
+        auto victimIterator = iterator;
+        double maxScore = score(AbsoluteTmpMapper::tmpFromAbsoluteIndex(*iterator));
+
+        ++iterator;
+        for (;iterator != m_spillWorklist.end(); ++iterator) {
+            double tmpScore = score(AbsoluteTmpMapper::tmpFromAbsoluteIndex(*iterator));
+            if (tmpScore > maxScore) {
+                ASSERT(!m_unspillableTmps.contains(*iterator));
+                victimIterator = iterator;
+                maxScore = tmpScore;
+            }
+        }
+
+        unsigned victimIndex = *victimIterator;
+        m_spillWorklist.remove(victimIterator);
+        m_simplifyWorklist.append(victimIndex);
+
+        freezeMoves(victimIndex);
+    }
+
+    void allocate()
+    {
+        ASSERT_WITH_MESSAGE(m_activeMoves.size() >= m_coalescingCandidates.size(), "The activeMove set should be big enough for the quick operations of BitVector.");
+
+        makeWorkList();
+
+        if (debug) {
+            dataLog("Interference: ", listDump(m_interferenceEdges), "\n");
+            dumpInterferenceGraphInDot(WTF::dataFile());
+            dataLog("Coalescing candidates:\n");
+            for (MoveOperands& moveOp : m_coalescingCandidates) {
+                dataLog("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(moveOp.srcIndex),
+                    " -> ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(moveOp.dstIndex), "\n");
+            }
+            dataLog("Initial work list\n");
+            dumpWorkLists(WTF::dataFile());
+        }
+
+        do {
+            if (traceDebug) {
+                dataLog("Before Graph simplification iteration\n");
+                dumpWorkLists(WTF::dataFile());
+            }
+
+            if (!m_simplifyWorklist.isEmpty())
+                simplify();
+            else if (!m_worklistMoves.isEmpty())
+                coalesce();
+            else if (!m_freezeWorklist.isEmpty())
+                freeze();
+            else if (!m_spillWorklist.isEmpty())
+                selectSpill();
+
+            if (traceDebug) {
+                dataLog("After Graph simplification iteration\n");
+                dumpWorkLists(WTF::dataFile());
+            }
+        } while (!m_simplifyWorklist.isEmpty() || !m_worklistMoves.isEmpty() || !m_freezeWorklist.isEmpty() || !m_spillWorklist.isEmpty());
+
+        assignColors();
+    }
+
+#if PLATFORM(COCOA)
+#pragma mark - Debugging helpers.
+#endif
+
+    void dumpInterferenceGraphInDot(PrintStream& out)
+    {
+        out.print("graph InterferenceGraph { \n");
+
+        HashSet tmpsWithInterferences;
+        for (const auto& edge : m_interferenceEdges) {
+            tmpsWithInterferences.add(AbsoluteTmpMapper::tmpFromAbsoluteIndex(edge.first()));
+            tmpsWithInterferences.add(AbsoluteTmpMapper::tmpFromAbsoluteIndex(edge.second()));
+        }
+
+        for (const auto& tmp : tmpsWithInterferences) {
+            unsigned tmpIndex = AbsoluteTmpMapper::absoluteIndex(tmp);
+            if (tmpIndex < m_degrees.size())
+                out.print("    ", tmp.internalValue(), " [label=\"", tmp, " (", m_degrees[tmpIndex], ")\"];\n");
+            else
+                out.print("    ", tmp.internalValue(), " [label=\"", tmp, "\"];\n");
+        }
+
+        for (const auto& edge : m_interferenceEdges)
+            out.print("    ", edge.first(), " -- ", edge.second(), ";\n");
+        out.print("}\n");
+    }
+
+    void dumpWorkLists(PrintStream& out)
+    {
+        out.print("Simplify work list:\n");
+        for (unsigned tmpIndex : m_simplifyWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+        out.printf("Moves work list is empty? %d\n", m_worklistMoves.isEmpty());
+        out.print("Freeze work list:\n");
+        for (unsigned tmpIndex : m_freezeWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+        out.print("Spill work list:\n");
+        for (unsigned tmpIndex : m_spillWorklist)
+            out.print("    ", AbsoluteTmpMapper::tmpFromAbsoluteIndex(tmpIndex), "\n");
+    }
+
+    using AbstractColoringAllocator::addEdge;
+    using AbstractColoringAllocator::getAlias;
+
+    Code& m_code;
+    TmpWidth& m_tmpWidth;
+    // FIXME: spilling should not type specific. It is only a side effect of using UseCounts.
+    const UseCounts& m_useCounts;
+};
+
+class IteratedRegisterCoalescing {
+public:
+    IteratedRegisterCoalescing(Code& code)
+        : m_code(code)
+        , m_useCounts(code)
+    {
+    }
+
+    void run()
+    {
+        padInterference(m_code);
+        
+        iteratedRegisterCoalescingOnType();
+        iteratedRegisterCoalescingOnType();
+
+        fixSpillsAfterTerminals();
+
+        if (reportStats)
+            dataLog("Num iterations = ", m_numIterations, "\n");
+    }
+
+private:
+    template
+    void iteratedRegisterCoalescingOnType()
+    {
+        HashSet unspillableTmps = computeUnspillableTmps();
+
+        // FIXME: If a Tmp is used only from a Scratch role and that argument is !admitsStack, then
+        // we should add the Tmp to unspillableTmps. That will help avoid relooping only to turn the
+        // Tmp into an unspillable Tmp.
+        // https://bugs.webkit.org/show_bug.cgi?id=152699
+        
+        while (true) {
+            ++m_numIterations;
+
+            if (traceDebug)
+                dataLog("Code at iteration ", m_numIterations, ":\n", m_code);
+
+            // FIXME: One way to optimize this code is to remove the recomputation inside the fixpoint.
+            // We need to recompute because spilling adds tmps, but we could just update tmpWidth when we
+            // add those tmps. Note that one easy way to remove the recomputation is to make any newly
+            // added Tmps get the same use/def widths that the original Tmp got. But, this may hurt the
+            // spill code we emit. Since we currently recompute TmpWidth after spilling, the newly
+            // created Tmps may get narrower use/def widths. On the other hand, the spiller already
+            // selects which move instruction to use based on the original Tmp's widths, so it may not
+            // matter than a subsequent iteration sees a coservative width for the new Tmps. Also, the
+            // recomputation may not actually be a performance problem; it's likely that a better way to
+            // improve performance of TmpWidth is to replace its HashMap with something else. It's
+            // possible that most of the TmpWidth overhead is from queries of TmpWidth rather than the
+            // recomputation, in which case speeding up the lookup would be a bigger win.
+            // https://bugs.webkit.org/show_bug.cgi?id=152478
+            m_tmpWidth.recompute(m_code);
+            
+            ColoringAllocator allocator(m_code, m_tmpWidth, m_useCounts, unspillableTmps);
+            if (!allocator.requiresSpilling()) {
+                assignRegistersToTmp(allocator);
+                if (traceDebug)
+                    dataLog("Successfull allocation at iteration ", m_numIterations, ":\n", m_code);
+
+                return;
+            }
+            addSpillAndFill(allocator, unspillableTmps);
+        }
+    }
+
+    template
+    HashSet computeUnspillableTmps()
+    {
+        HashSet unspillableTmps;
+
+        struct Range {
+            unsigned first { std::numeric_limits::max() };
+            unsigned last { 0 };
+            unsigned count { 0 };
+            unsigned admitStackCount { 0 };
+        };
+
+        unsigned numTmps = m_code.numTmps(type);
+        unsigned arraySize = AbsoluteTmpMapper::absoluteIndex(numTmps);
+
+        Vector ranges;
+        ranges.fill(Range(), arraySize);
+
+        unsigned globalIndex = 0;
+        for (BasicBlock* block : m_code) {
+            for (Inst& inst : *block) {
+                inst.forEachArg([&] (Arg& arg, Arg::Role, Arg::Type argType, Arg::Width) {
+                    if (arg.isTmp() && inst.admitsStack(arg)) {
+                        if (argType != type)
+                            return;
+
+                        Tmp tmp = arg.tmp();
+                        Range& range = ranges[AbsoluteTmpMapper::absoluteIndex(tmp)];
+                        range.count++;
+                        range.admitStackCount++;
+                        if (globalIndex < range.first) {
+                            range.first = globalIndex;
+                            range.last = globalIndex;
+                        } else
+                            range.last = globalIndex;
+
+                        return;
+                    }
+
+                    arg.forEachTmpFast([&] (Tmp& tmp) {
+                        if (tmp.isGP() != (type == Arg::GP))
+                            return;
+
+                        Range& range = ranges[AbsoluteTmpMapper::absoluteIndex(tmp)];
+                        range.count++;
+                        if (globalIndex < range.first) {
+                            range.first = globalIndex;
+                            range.last = globalIndex;
+                        } else
+                            range.last = globalIndex;
+                    });
+                });
+
+                ++globalIndex;
+            }
+            ++globalIndex;
+        }
+        for (unsigned i = AbsoluteTmpMapper::lastMachineRegisterIndex() + 1; i < ranges.size(); ++i) {
+            Range& range = ranges[i];
+            if (range.last - range.first <= 1 && range.count > range.admitStackCount)
+                unspillableTmps.add(i);
+        }
+
+        return unspillableTmps;
+    }
+
+    template
+    void assignRegistersToTmp(const ColoringAllocator& allocator)
+    {
+        for (BasicBlock* block : m_code) {
+            // Give Tmp a valid register.
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+
+                // The mayBeCoalescable() method will change its mind for some operations after we
+                // complete register allocation. So, we record this before starting.
+                bool mayBeCoalescable = allocator.mayBeCoalescable(inst);
+
+                // Move32 is cheaper if we know that it's equivalent to a Move. It's
+                // equivalent if the destination's high bits are not observable or if the source's high
+                // bits are all zero. Note that we don't have the opposite optimization for other
+                // architectures, which may prefer Move over Move32, because Move is canonical already.
+                if (type == Arg::GP && inst.kind.opcode == Move
+                    && inst.args[0].isTmp() && inst.args[1].isTmp()) {
+                    if (m_tmpWidth.useWidth(inst.args[1].tmp()) <= Arg::Width32
+                        || m_tmpWidth.defWidth(inst.args[0].tmp()) <= Arg::Width32)
+                        inst.kind.opcode = Move32;
+                }
+
+                inst.forEachTmpFast([&] (Tmp& tmp) {
+                    if (tmp.isReg() || tmp.isGP() == (type != Arg::GP))
+                        return;
+
+                    Tmp aliasTmp = allocator.getAlias(tmp);
+                    Tmp assignedTmp;
+                    if (aliasTmp.isReg())
+                        assignedTmp = Tmp(aliasTmp.reg());
+                    else {
+                        auto reg = allocator.allocatedReg(aliasTmp);
+                        ASSERT(reg);
+                        assignedTmp = Tmp(reg);
+                    }
+                    ASSERT(assignedTmp.isReg());
+                    tmp = assignedTmp;
+                });
+
+                if (mayBeCoalescable && inst.args[0].isTmp() && inst.args[1].isTmp()
+                    && inst.args[0].tmp() == inst.args[1].tmp())
+                    inst = Inst();
+            }
+
+            // Remove all the useless moves we created in this block.
+            block->insts().removeAllMatching([&] (const Inst& inst) {
+                return !inst;
+            });
+        }
+    }
+
+    static unsigned stackSlotMinimumWidth(Arg::Width width)
+    {
+        return width <= Arg::Width32 ? 4 : 8;
+    }
+
+    template
+    void addSpillAndFill(const ColoringAllocator& allocator, HashSet& unspillableTmps)
+    {
+        HashMap stackSlots;
+        for (Tmp tmp : allocator.spilledTmps()) {
+            // All the spilled values become unspillable.
+            unspillableTmps.add(AbsoluteTmpMapper::absoluteIndex(tmp));
+
+            // Allocate stack slot for each spilled value.
+            StackSlot* stackSlot = m_code.addStackSlot(
+                stackSlotMinimumWidth(m_tmpWidth.requiredWidth(tmp)), StackSlotKind::Spill);
+            bool isNewTmp = stackSlots.add(tmp, stackSlot).isNewEntry;
+            ASSERT_UNUSED(isNewTmp, isNewTmp);
+        }
+
+        // Rewrite the program to get rid of the spilled Tmp.
+        InsertionSet insertionSet(m_code);
+        for (BasicBlock* block : m_code) {
+            bool hasAliasedTmps = false;
+
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+
+                // The TmpWidth analysis will say that a Move only stores 32 bits into the destination,
+                // if the source only had 32 bits worth of non-zero bits. Same for the source: it will
+                // only claim to read 32 bits from the source if only 32 bits of the destination are
+                // read. Note that we only apply this logic if this turns into a load or store, since
+                // Move is the canonical way to move data between GPRs.
+                bool canUseMove32IfDidSpill = false;
+                bool didSpill = false;
+                if (type == Arg::GP && inst.kind.opcode == Move) {
+                    if ((inst.args[0].isTmp() && m_tmpWidth.width(inst.args[0].tmp()) <= Arg::Width32)
+                        || (inst.args[1].isTmp() && m_tmpWidth.width(inst.args[1].tmp()) <= Arg::Width32))
+                        canUseMove32IfDidSpill = true;
+                }
+
+                // Try to replace the register use by memory use when possible.
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type argType, Arg::Width width) {
+                        if (!arg.isTmp())
+                            return;
+                        if (argType != type)
+                            return;
+                        if (arg.isReg())
+                            return;
+                        
+                        auto stackSlotEntry = stackSlots.find(arg.tmp());
+                        if (stackSlotEntry == stackSlots.end())
+                            return;
+                        if (!inst.admitsStack(arg))
+                            return;
+                        
+                        // If the Tmp holds a constant then we want to rematerialize its
+                        // value rather than loading it from the stack. In order for that
+                        // optimization to kick in, we need to avoid placing the Tmp's stack
+                        // address into the instruction.
+                        if (!Arg::isColdUse(role)) {
+                            const UseCounts::Counts* counts = m_useCounts[arg.tmp()];
+                            if (counts && counts->numConstDefs == 1 && counts->numDefs == 1)
+                                return;
+                        }
+                        
+                        Arg::Width spillWidth = m_tmpWidth.requiredWidth(arg.tmp());
+                        if (Arg::isAnyDef(role) && width < spillWidth)
+                            return;
+                        ASSERT(inst.kind.opcode == Move || !(Arg::isAnyUse(role) && width > spillWidth));
+                        
+                        if (spillWidth != Arg::Width32)
+                            canUseMove32IfDidSpill = false;
+                        
+                        stackSlotEntry->value->ensureSize(
+                            canUseMove32IfDidSpill ? 4 : Arg::bytes(width));
+                        arg = Arg::stack(stackSlotEntry->value);
+                        didSpill = true;
+                    });
+
+                if (didSpill && canUseMove32IfDidSpill)
+                    inst.kind.opcode = Move32;
+
+                // For every other case, add Load/Store as needed.
+                inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Arg::Type argType, Arg::Width) {
+                    if (tmp.isReg() || argType != type)
+                        return;
+
+                    auto stackSlotEntry = stackSlots.find(tmp);
+                    if (stackSlotEntry == stackSlots.end()) {
+                        Tmp alias = allocator.getAliasWhenSpilling(tmp);
+                        if (alias != tmp) {
+                            tmp = alias;
+                            hasAliasedTmps = true;
+                        }
+                        return;
+                    }
+
+                    Arg::Width spillWidth = m_tmpWidth.requiredWidth(tmp);
+                    Opcode move = Oops;
+                    switch (stackSlotMinimumWidth(spillWidth)) {
+                    case 4:
+                        move = type == Arg::GP ? Move32 : MoveFloat;
+                        break;
+                    case 8:
+                        move = type == Arg::GP ? Move : MoveDouble;
+                        break;
+                    default:
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                    }
+
+                    tmp = m_code.newTmp(type);
+                    unspillableTmps.add(AbsoluteTmpMapper::absoluteIndex(tmp));
+
+                    Arg arg = Arg::stack(stackSlotEntry->value);
+                    if (Arg::isAnyUse(role) && role != Arg::Scratch)
+                        insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+                    if (Arg::isAnyDef(role))
+                        insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+                });
+            }
+            insertionSet.execute(block);
+
+            if (hasAliasedTmps) {
+                block->insts().removeAllMatching([&] (const Inst& inst) {
+                    return allocator.isUselessMove(inst);
+                });
+            }
+        }
+    }
+
+    void fixSpillsAfterTerminals()
+    {
+        // Because there may be terminals that produce values, IRC may
+        // want to spill those terminals. It'll happen to spill it after
+        // the terminal. If we left the graph in this state, it'd be invalid
+        // because a terminal must be the last instruction in a block.
+        // We fix that here.
+
+        InsertionSet insertionSet(m_code);
+
+        bool addedBlocks = false;
+
+        for (BasicBlock* block : m_code) {
+            unsigned terminalIndex = block->size();
+            bool foundTerminal = false;
+            while (terminalIndex--) {
+                if (block->at(terminalIndex).isTerminal()) {
+                    foundTerminal = true;
+                    break;
+                }
+            }
+            ASSERT_UNUSED(foundTerminal, foundTerminal);
+
+            if (terminalIndex == block->size() - 1)
+                continue;
+
+            // There must be instructions after the terminal because it's not the last instruction.
+            ASSERT(terminalIndex < block->size() - 1);
+            Vector instsToMove;
+            for (unsigned i = terminalIndex + 1; i < block->size(); i++)
+                instsToMove.append(block->at(i));
+            RELEASE_ASSERT(instsToMove.size());
+
+            for (FrequentedBlock& frequentedSuccessor : block->successors()) {
+                BasicBlock* successor = frequentedSuccessor.block();
+                // If successor's only predecessor is block, we can plant the spill inside
+                // the successor. Otherwise, we must split the critical edge and create
+                // a new block for the spill.
+                if (successor->numPredecessors() == 1) {
+                    insertionSet.insertInsts(0, instsToMove);
+                    insertionSet.execute(successor);
+                } else {
+                    addedBlocks = true;
+                    // FIXME: We probably want better block ordering here.
+                    BasicBlock* newBlock = m_code.addBlock();
+                    for (const Inst& inst : instsToMove)
+                        newBlock->appendInst(inst);
+                    newBlock->appendInst(Inst(Jump, instsToMove.last().origin));
+                    newBlock->successors().append(successor);
+                    frequentedSuccessor.block() = newBlock;
+                }
+            }
+
+            block->resize(terminalIndex + 1);
+        }
+
+        if (addedBlocks)
+            m_code.resetReachability();
+    }
+
+    Code& m_code;
+    TmpWidth m_tmpWidth;
+    UseCounts m_useCounts;
+    unsigned m_numIterations { 0 };
+};
+
+} // anonymous namespace
+
+void iteratedRegisterCoalescing(Code& code)
+{
+    PhaseScope phaseScope(code, "iteratedRegisterCoalescing");
+    
+    IteratedRegisterCoalescing iteratedRegisterCoalescing(code);
+    iteratedRegisterCoalescing.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirIteratedRegisterCoalescing.h b/b3/air/AirIteratedRegisterCoalescing.h
new file mode 100644
index 0000000..ab689b3
--- /dev/null
+++ b/b3/air/AirIteratedRegisterCoalescing.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a register allocation phase based on Andrew Appel's Iterated Register Coalescing
+// http://www.cs.cmu.edu/afs/cs/academic/class/15745-s07/www/papers/george.pdf
+void iteratedRegisterCoalescing(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirKind.cpp b/b3/air/AirKind.cpp
new file mode 100644
index 0000000..9fe2525
--- /dev/null
+++ b/b3/air/AirKind.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Kind::dump(PrintStream& out) const
+{
+    out.print(opcode);
+    
+    CommaPrinter comma(", ", "<");
+    if (traps)
+        out.print(comma, "Traps");
+    if (comma.didPrint())
+        out.print(">");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirKind.h b/b3/air/AirKind.h
new file mode 100644
index 0000000..e723d46
--- /dev/null
+++ b/b3/air/AirKind.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef AirKind_h
+#define AirKind_h
+
+#if ENABLE(B3_JIT)
+
+#include "AirOpcode.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+// Air opcodes are always carried around with some flags. These flags are understood as having no
+// meaning if they are set for an opcode to which they do not apply. This makes sense, since Air
+// is a complex instruction set and most of these flags can apply to basically any opcode. In
+// fact, it's recommended to only represent something as a flag if you believe that it is largely
+// opcode-agnostic.
+
+struct Kind {
+    Kind(Opcode opcode)
+        : opcode(opcode)
+        , traps(false)
+    {
+    }
+    
+    Kind()
+        : Kind(Nop)
+    {
+    }
+    
+    bool operator==(const Kind& other) const
+    {
+        return opcode == other.opcode
+            && traps == other.traps;
+    }
+    
+    bool operator!=(const Kind& other) const
+    {
+        return !(*this == other);
+    }
+    
+    unsigned hash() const
+    {
+        return static_cast(opcode) + (static_cast(traps) << 16);
+    }
+    
+    explicit operator bool() const
+    {
+        return *this != Kind();
+    }
+    
+    void dump(PrintStream&) const;
+    
+    Opcode opcode;
+    
+    // This is an opcode-agnostic flag that indicates that we expect that this instruction will
+    // trap. This causes the compiler to assume that this side-exits and therefore has non-control
+    // non-arg effects. This also causes the compiler to tell you about all of these instructions.
+    // Note that this is just one of several ways of supporting trapping in Air, and it's the less
+    // precise variant because it's origin-based. This means that if an instruction was fused out
+    // of B3 values that had different origins, then the origin at which you'll appear to trap
+    // will be somewhat random. The upside of this approach is that it imposes by far the least
+    // overhead on the compiler.
+    // FIXME: Make this completely work.
+    // https://bugs.webkit.org/show_bug.cgi?id=162689
+    bool traps : 1;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+#endif // AirKind_h
+
diff --git a/b3/air/AirLiveness.h b/b3/air/AirLiveness.h
new file mode 100644
index 0000000..e727c36
--- /dev/null
+++ b/b3/air/AirLiveness.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirBasicBlock.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirStackSlot.h"
+#include "AirTmpInlines.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+template
+struct TmpLivenessAdapter {
+    typedef Tmp Thing;
+    typedef HashSet IndexSet;
+
+    TmpLivenessAdapter(Code&) { }
+
+    static unsigned numIndices(Code& code)
+    {
+        unsigned numTmps = code.numTmps(adapterType);
+        return AbsoluteTmpMapper::absoluteIndex(numTmps);
+    }
+    static bool acceptsType(Arg::Type type) { return type == adapterType; }
+    static unsigned valueToIndex(Tmp tmp) { return AbsoluteTmpMapper::absoluteIndex(tmp); }
+    static Tmp indexToValue(unsigned index) { return AbsoluteTmpMapper::tmpFromAbsoluteIndex(index); }
+};
+
+struct StackSlotLivenessAdapter {
+    typedef StackSlot* Thing;
+    typedef HashSet::Hash, WTF::UnsignedWithZeroKeyHashTraits> IndexSet;
+
+    StackSlotLivenessAdapter(Code& code)
+        : m_code(code)
+    {
+    }
+
+    static unsigned numIndices(Code& code)
+    {
+        return code.stackSlots().size();
+    }
+    static bool acceptsType(Arg::Type) { return true; }
+    static unsigned valueToIndex(StackSlot* stackSlot) { return stackSlot->index(); }
+    StackSlot* indexToValue(unsigned index) { return m_code.stackSlots()[index]; }
+
+private:
+    Code& m_code;
+};
+
+struct RegLivenessAdapter {
+    typedef Reg Thing;
+    typedef BitVector IndexSet;
+
+    RegLivenessAdapter(Code&) { }
+
+    static unsigned numIndices(Code&)
+    {
+        return Reg::maxIndex() + 1;
+    }
+
+    static bool acceptsType(Arg::Type) { return true; }
+    static unsigned valueToIndex(Reg reg) { return reg.index(); }
+    Reg indexToValue(unsigned index) { return Reg::fromIndex(index); }
+};
+
+template
+class AbstractLiveness : public Adapter {
+    struct Workset;
+public:
+    typedef typename Adapter::Thing Thing;
+    
+    AbstractLiveness(Code& code)
+        : Adapter(code)
+        , m_workset(Adapter::numIndices(code))
+        , m_liveAtHead(code.size())
+        , m_liveAtTail(code.size())
+    {
+        // The liveAtTail of each block automatically contains the LateUse's of the terminal.
+        for (BasicBlock* block : code) {
+            typename Adapter::IndexSet& liveAtTail = m_liveAtTail[block];
+
+            block->last().forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+                        liveAtTail.add(Adapter::valueToIndex(thing));
+                });
+        }
+
+        // Blocks with new live values at tail.
+        BitVector dirtyBlocks;
+        for (size_t blockIndex = 0; blockIndex < code.size(); ++blockIndex)
+            dirtyBlocks.set(blockIndex);
+
+        bool changed;
+        do {
+            changed = false;
+
+            for (size_t blockIndex = code.size(); blockIndex--;) {
+                BasicBlock* block = code.at(blockIndex);
+                if (!block)
+                    continue;
+
+                if (!dirtyBlocks.quickClear(blockIndex))
+                    continue;
+
+                LocalCalc localCalc(*this, block);
+                for (size_t instIndex = block->size(); instIndex--;)
+                    localCalc.execute(instIndex);
+
+                // Handle the early def's of the first instruction.
+                block->at(0).forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+                            m_workset.remove(Adapter::valueToIndex(thing));
+                    });
+
+                Vector& liveAtHead = m_liveAtHead[block];
+
+                // We only care about Tmps that were discovered in this iteration. It is impossible
+                // to remove a live value from the head.
+                // We remove all the values we already knew about so that we only have to deal with
+                // what is new in LiveAtHead.
+                if (m_workset.size() == liveAtHead.size())
+                    m_workset.clear();
+                else {
+                    for (unsigned liveIndexAtHead : liveAtHead)
+                        m_workset.remove(liveIndexAtHead);
+                }
+
+                if (m_workset.isEmpty())
+                    continue;
+
+                liveAtHead.reserveCapacity(liveAtHead.size() + m_workset.size());
+                for (unsigned newValue : m_workset)
+                    liveAtHead.uncheckedAppend(newValue);
+
+                for (BasicBlock* predecessor : block->predecessors()) {
+                    typename Adapter::IndexSet& liveAtTail = m_liveAtTail[predecessor];
+                    for (unsigned newValue : m_workset) {
+                        if (liveAtTail.add(newValue)) {
+                            if (!dirtyBlocks.quickSet(predecessor->index()))
+                                changed = true;
+                        }
+                    }
+                }
+            }
+        } while (changed);
+    }
+
+    // This calculator has to be run in reverse.
+    class LocalCalc {
+    public:
+        LocalCalc(AbstractLiveness& liveness, BasicBlock* block)
+            : m_liveness(liveness)
+            , m_block(block)
+        {
+            auto& workset = liveness.m_workset;
+            workset.clear();
+            typename Adapter::IndexSet& liveAtTail = liveness.m_liveAtTail[block];
+            for (unsigned index : liveAtTail)
+                workset.add(index);
+        }
+
+        struct Iterator {
+            Iterator(Adapter& adapter, IndexSparseSet::const_iterator sparceSetIterator)
+                : m_adapter(adapter)
+                , m_sparceSetIterator(sparceSetIterator)
+            {
+            }
+
+            Iterator& operator++()
+            {
+                ++m_sparceSetIterator;
+                return *this;
+            }
+
+            typename Adapter::Thing operator*() const
+            {
+                return m_adapter.indexToValue(*m_sparceSetIterator);
+            }
+
+            bool operator==(const Iterator& other) { return m_sparceSetIterator == other.m_sparceSetIterator; }
+            bool operator!=(const Iterator& other) { return m_sparceSetIterator != other.m_sparceSetIterator; }
+
+        private:
+            Adapter& m_adapter;
+            IndexSparseSet::const_iterator m_sparceSetIterator;
+        };
+
+        struct Iterable {
+            Iterable(AbstractLiveness& liveness)
+                : m_liveness(liveness)
+            {
+            }
+
+            Iterator begin() const { return Iterator(m_liveness, m_liveness.m_workset.begin()); }
+            Iterator end() const { return Iterator(m_liveness, m_liveness.m_workset.end()); }
+            
+            bool contains(const typename Adapter::Thing& thing) const
+            {
+                return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+            }
+
+        private:
+            AbstractLiveness& m_liveness;
+        };
+
+        Iterable live() const
+        {
+            return Iterable(m_liveness);
+        }
+
+        bool isLive(const typename Adapter::Thing& thing) const
+        {
+            return live().contains(thing);
+        }
+
+        void execute(unsigned instIndex)
+        {
+            Inst& inst = m_block->at(instIndex);
+            auto& workset = m_liveness.m_workset;
+
+            // First handle the early def's of the next instruction.
+            if (instIndex + 1 < m_block->size()) {
+                Inst& nextInst = m_block->at(instIndex + 1);
+                nextInst.forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isEarlyDef(role) && Adapter::acceptsType(type))
+                            workset.remove(Adapter::valueToIndex(thing));
+                    });
+            }
+            
+            // Then handle def's.
+            inst.forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isLateDef(role) && Adapter::acceptsType(type))
+                        workset.remove(Adapter::valueToIndex(thing));
+                });
+
+            // Then handle use's.
+            inst.forEach(
+                [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (Arg::isEarlyUse(role) && Adapter::acceptsType(type))
+                        workset.add(Adapter::valueToIndex(thing));
+                });
+
+            // And finally, handle the late use's of the previous instruction.
+            if (instIndex) {
+                Inst& prevInst = m_block->at(instIndex - 1);
+                prevInst.forEach(
+                    [&] (typename Adapter::Thing& thing, Arg::Role role, Arg::Type type, Arg::Width) {
+                        if (Arg::isLateUse(role) && Adapter::acceptsType(type))
+                            workset.add(Adapter::valueToIndex(thing));
+                    });
+            }
+        }
+
+    private:
+        AbstractLiveness& m_liveness;
+        BasicBlock* m_block;
+    };
+
+    const Vector& rawLiveAtHead(BasicBlock* block)
+    {
+        return m_liveAtHead[block];
+    }
+
+    template
+    class Iterable {
+    public:
+        Iterable(AbstractLiveness& liveness, const UnderlyingIterable& iterable)
+            : m_liveness(liveness)
+            , m_iterable(iterable)
+        {
+        }
+
+        class iterator {
+        public:
+            iterator()
+                : m_liveness(nullptr)
+                , m_iter()
+            {
+            }
+            
+            iterator(AbstractLiveness& liveness, typename UnderlyingIterable::const_iterator iter)
+                : m_liveness(&liveness)
+                , m_iter(iter)
+            {
+            }
+
+            typename Adapter::Thing operator*()
+            {
+                return m_liveness->indexToValue(*m_iter);
+            }
+
+            iterator& operator++()
+            {
+                ++m_iter;
+                return *this;
+            }
+
+            bool operator==(const iterator& other) const
+            {
+                ASSERT(m_liveness == other.m_liveness);
+                return m_iter == other.m_iter;
+            }
+
+            bool operator!=(const iterator& other) const
+            {
+                return !(*this == other);
+            }
+
+        private:
+            AbstractLiveness* m_liveness;
+            typename UnderlyingIterable::const_iterator m_iter;
+        };
+
+        iterator begin() const { return iterator(m_liveness, m_iterable.begin()); }
+        iterator end() const { return iterator(m_liveness, m_iterable.end()); }
+
+        bool contains(const typename Adapter::Thing& thing) const
+        {
+            return m_liveness.m_workset.contains(Adapter::valueToIndex(thing));
+        }
+
+    private:
+        AbstractLiveness& m_liveness;
+        const UnderlyingIterable& m_iterable;
+    };
+
+    Iterable> liveAtHead(BasicBlock* block)
+    {
+        return Iterable>(*this, m_liveAtHead[block]);
+    }
+
+    Iterable liveAtTail(BasicBlock* block)
+    {
+        return Iterable(*this, m_liveAtTail[block]);
+    }
+
+    IndexSparseSet& workset() { return m_workset; }
+
+private:
+    friend class LocalCalc;
+    friend struct LocalCalc::Iterable;
+
+    IndexSparseSet m_workset;
+    IndexMap> m_liveAtHead;
+    IndexMap m_liveAtTail;
+};
+
+template
+using TmpLiveness = AbstractLiveness>;
+
+typedef AbstractLiveness> GPLiveness;
+typedef AbstractLiveness> FPLiveness;
+typedef AbstractLiveness StackSlotLiveness;
+typedef AbstractLiveness RegLiveness;
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirLogRegisterPressure.cpp b/b3/air/AirLogRegisterPressure.cpp
new file mode 100644
index 0000000..dbbb257
--- /dev/null
+++ b/b3/air/AirLogRegisterPressure.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirLogRegisterPressure.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void logRegisterPressure(Code& code)
+{
+    const unsigned totalColumns = 200;
+    const unsigned registerColumns = 100;
+    
+    RegLiveness liveness(code);
+
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        block->dumpHeader(WTF::dataFile());
+
+        Vector instDumps;
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+            Inst* prevInst = block->get(instIndex - 1);
+
+            localCalc.execute(instIndex);
+
+            RegisterSet set;
+            set.setAll(localCalc.live());
+            Inst::forEachDefWithExtraClobberedRegs(
+                prevInst, &inst,
+                [&] (Reg reg, Arg::Role, Arg::Type, Arg::Width) {
+                    set.set(reg);
+                });
+
+            StringPrintStream instOut;
+            StringPrintStream lineOut;
+            lineOut.print("   ");
+            if (set.numberOfSetRegisters()) {
+                set.forEach(
+                    [&] (Reg reg) {
+                        CString text = toCString(" ", reg);
+                        if (text.length() + lineOut.length() > totalColumns) {
+                            instOut.print(lineOut.toCString(), "\n");
+                            lineOut.reset();
+                            lineOut.print("       ");
+                        }
+                        lineOut.print(text);
+                    });
+                lineOut.print(":");
+            }
+            if (lineOut.length() > registerColumns) {
+                instOut.print(lineOut.toCString(), "\n");
+                lineOut.reset();
+            }
+            while (lineOut.length() < registerColumns)
+                lineOut.print(" ");
+            lineOut.print(" ");
+            lineOut.print(inst);
+            instOut.print(lineOut.toCString(), "\n");
+            instDumps.append(instOut.toCString());
+        }
+
+        for (unsigned i = instDumps.size(); i--;)
+            dataLog(instDumps[i]);
+        
+        block->dumpFooter(WTF::dataFile());
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirLogRegisterPressure.h b/b3/air/AirLogRegisterPressure.h
new file mode 100644
index 0000000..3f7c3e2
--- /dev/null
+++ b/b3/air/AirLogRegisterPressure.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Dumps the registers that are used at each instruction.
+void logRegisterPressure(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirLowerAfterRegAlloc.cpp b/b3/air/AirLowerAfterRegAlloc.cpp
new file mode 100644
index 0000000..e001873
--- /dev/null
+++ b/b3/air/AirLowerAfterRegAlloc.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerAfterRegAlloc.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirEmitShuffle.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+#include "RegisterSet.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+bool verbose = false;
+    
+} // anonymous namespace
+
+void lowerAfterRegAlloc(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerAfterRegAlloc");
+
+    if (verbose)
+        dataLog("Code before lowerAfterRegAlloc:\n", code);
+
+    HashMap usedRegisters;
+
+    RegLiveness liveness(code);
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+            
+            RegisterSet set;
+
+            bool isRelevant = inst.kind.opcode == Shuffle || inst.kind.opcode == ColdCCall;
+            
+            if (isRelevant) {
+                for (Reg reg : localCalc.live())
+                    set.set(reg);
+            }
+            
+            localCalc.execute(instIndex);
+
+            if (isRelevant)
+                usedRegisters.add(&inst, set);
+        }
+    }
+
+    auto getScratches = [&] (RegisterSet set, Arg::Type type) -> std::array {
+        std::array result;
+        for (unsigned i = 0; i < 2; ++i) {
+            bool found = false;
+            for (Reg reg : code.regsInPriorityOrder(type)) {
+                if (!set.get(reg)) {
+                    result[i] = Tmp(reg);
+                    set.set(reg);
+                    found = true;
+                    break;
+                }
+            }
+            if (!found) {
+                result[i] = Arg::stack(
+                    code.addStackSlot(
+                        Arg::bytes(Arg::conservativeWidth(type)),
+                        StackSlotKind::Spill));
+            }
+        }
+        return result;
+    };
+
+    // Now transform the code.
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+
+            switch (inst.kind.opcode) {
+            case Shuffle: {
+                RegisterSet set = usedRegisters.get(&inst);
+                Vector pairs;
+                for (unsigned i = 0; i < inst.args.size(); i += 3) {
+                    Arg src = inst.args[i + 0];
+                    Arg dst = inst.args[i + 1];
+                    Arg::Width width = inst.args[i + 2].width();
+
+                    // The used register set contains things live after the shuffle. But
+                    // emitShuffle() wants a scratch register that is not just dead but also does not
+                    // interfere with either sources or destinations.
+                    auto excludeRegisters = [&] (Tmp tmp) {
+                        if (tmp.isReg())
+                            set.set(tmp.reg());
+                    };
+                    src.forEachTmpFast(excludeRegisters);
+                    dst.forEachTmpFast(excludeRegisters);
+                    
+                    pairs.append(ShufflePair(src, dst, width));
+                }
+                std::array gpScratch = getScratches(set, Arg::GP);
+                std::array fpScratch = getScratches(set, Arg::FP);
+                insertionSet.insertInsts(
+                    instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+                inst = Inst();
+                break;
+            }
+
+            case ColdCCall: {
+                CCallValue* value = inst.origin->as();
+                Kind oldKind = inst.kind;
+
+                RegisterSet liveRegs = usedRegisters.get(&inst);
+                RegisterSet regsToSave = liveRegs;
+                regsToSave.exclude(RegisterSet::calleeSaveRegisters());
+                regsToSave.exclude(RegisterSet::stackRegisters());
+                regsToSave.exclude(RegisterSet::reservedHardwareRegisters());
+
+                RegisterSet preUsed = regsToSave;
+                Vector destinations = computeCCallingConvention(code, value);
+                Tmp result = cCallResult(value->type());
+                Arg originalResult = result ? inst.args[1] : Arg();
+                
+                Vector pairs;
+                for (unsigned i = 0; i < destinations.size(); ++i) {
+                    Value* child = value->child(i);
+                    Arg src = inst.args[result ? (i >= 1 ? i + 1 : i) : i ];
+                    Arg dst = destinations[i];
+                    Arg::Width width = Arg::widthForB3Type(child->type());
+                    pairs.append(ShufflePair(src, dst, width));
+
+                    auto excludeRegisters = [&] (Tmp tmp) {
+                        if (tmp.isReg())
+                            preUsed.set(tmp.reg());
+                    };
+                    src.forEachTmpFast(excludeRegisters);
+                    dst.forEachTmpFast(excludeRegisters);
+                }
+
+                std::array gpScratch = getScratches(preUsed, Arg::GP);
+                std::array fpScratch = getScratches(preUsed, Arg::FP);
+                
+                // Also need to save all live registers. Don't need to worry about the result
+                // register.
+                if (originalResult.isReg())
+                    regsToSave.clear(originalResult.reg());
+                Vector stackSlots;
+                regsToSave.forEach(
+                    [&] (Reg reg) {
+                        Tmp tmp(reg);
+                        Arg arg(tmp);
+                        Arg::Width width = Arg::conservativeWidth(arg.type());
+                        StackSlot* stackSlot =
+                            code.addStackSlot(Arg::bytes(width), StackSlotKind::Spill);
+                        pairs.append(ShufflePair(arg, Arg::stack(stackSlot), width));
+                        stackSlots.append(stackSlot);
+                    });
+
+                if (verbose)
+                    dataLog("Pre-call pairs for ", inst, ": ", listDump(pairs), "\n");
+                
+                insertionSet.insertInsts(
+                    instIndex, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+
+                inst = buildCCall(code, inst.origin, destinations);
+                if (oldKind.traps)
+                    inst.kind.traps = true;
+
+                // Now we need to emit code to restore registers.
+                pairs.resize(0);
+                unsigned stackSlotIndex = 0;
+                regsToSave.forEach(
+                    [&] (Reg reg) {
+                        Tmp tmp(reg);
+                        Arg arg(tmp);
+                        Arg::Width width = Arg::conservativeWidth(arg.type());
+                        StackSlot* stackSlot = stackSlots[stackSlotIndex++];
+                        pairs.append(ShufflePair(Arg::stack(stackSlot), arg, width));
+                    });
+                if (result) {
+                    ShufflePair pair(result, originalResult, Arg::widthForB3Type(value->type()));
+                    pairs.append(pair);
+                }
+
+                // For finding scratch registers, we need to account for the possibility that
+                // the result is dead.
+                if (originalResult.isReg())
+                    liveRegs.set(originalResult.reg());
+
+                gpScratch = getScratches(liveRegs, Arg::GP);
+                fpScratch = getScratches(liveRegs, Arg::FP);
+                
+                insertionSet.insertInsts(
+                    instIndex + 1, emitShuffle(code, pairs, gpScratch, fpScratch, inst.origin));
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+
+        insertionSet.execute(block);
+
+        block->insts().removeAllMatching(
+            [&] (Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+
+    if (verbose)
+        dataLog("Code after lowerAfterRegAlloc:\n", code);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirLowerAfterRegAlloc.h b/b3/air/AirLowerAfterRegAlloc.h
new file mode 100644
index 0000000..d8234a7
--- /dev/null
+++ b/b3/air/AirLowerAfterRegAlloc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This lowers Shuffle and ColdCCall instructions. This phase is designed to be run after register
+// allocation.
+
+void lowerAfterRegAlloc(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirLowerEntrySwitch.cpp b/b3/air/AirLowerEntrySwitch.cpp
new file mode 100644
index 0000000..e14641d
--- /dev/null
+++ b/b3/air/AirLowerEntrySwitch.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerEntrySwitch.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerEntrySwitch(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerEntrySwitch");
+    
+    // Figure out the set of blocks that should be duplicated.
+    BlockWorklist worklist;
+    for (BasicBlock* block : code) {
+        if (block->last().kind.opcode == EntrySwitch)
+            worklist.push(block);
+    }
+    
+    // It's possible that we don't have any EntrySwitches. That's fine.
+    if (worklist.seen().isEmpty()) {
+        Vector entrypoints(code.proc().numEntrypoints(), FrequentedBlock(code[0]));
+        code.setEntrypoints(WTFMove(entrypoints));
+        return;
+    }
+    
+    while (BasicBlock* block = worklist.pop())
+        worklist.pushAll(block->predecessors());
+    
+    RELEASE_ASSERT(worklist.saw(code[0]));
+    
+    Vector entrypointFrequencies(code.proc().numEntrypoints(), FrequencyClass::Rare);
+    for (BasicBlock* block : code) {
+        if (block->last().kind.opcode != EntrySwitch)
+            continue;
+        for (unsigned entrypointIndex = code.proc().numEntrypoints(); entrypointIndex--;) {
+            entrypointFrequencies[entrypointIndex] = maxFrequency(
+                entrypointFrequencies[entrypointIndex],
+                block->successor(entrypointIndex).frequency());
+        }
+    }
+    
+    auto fixEntrySwitch = [&] (BasicBlock* block, unsigned entrypointIndex) {
+        if (block->last().kind.opcode != EntrySwitch)
+            return;
+        FrequentedBlock target = block->successor(entrypointIndex);
+        block->last().kind.opcode = Jump;
+        block->successors().resize(1);
+        block->successor(0) = target;
+    };
+    
+    // Now duplicate them.
+    Vector entrypoints;
+    entrypoints.append(FrequentedBlock(code[0], entrypointFrequencies[0]));
+    IndexMap map(code.size());
+    for (unsigned entrypointIndex = 1; entrypointIndex < code.proc().numEntrypoints(); ++entrypointIndex) {
+        map.clear();
+        for (BasicBlock* block : worklist.seen().values(code))
+            map[block] = code.addBlock(block->frequency());
+        entrypoints.append(FrequentedBlock(map[code[0]], entrypointFrequencies[entrypointIndex]));
+        for (BasicBlock* block : worklist.seen().values(code)) {
+            BasicBlock* newBlock = map[block];
+            for (const Inst& inst : *block)
+                newBlock->appendInst(inst);
+            newBlock->successors() = block->successors();
+            for (BasicBlock*& successor : newBlock->successorBlocks()) {
+                if (BasicBlock* replacement = map[successor])
+                    successor = replacement;
+            }
+            fixEntrySwitch(newBlock, entrypointIndex);
+        }
+    }
+    for (BasicBlock* block : worklist.seen().values(code))
+        fixEntrySwitch(block, 0);
+    
+    code.setEntrypoints(WTFMove(entrypoints));
+    code.resetReachability();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/b3/air/AirLowerEntrySwitch.h b/b3/air/AirLowerEntrySwitch.h
new file mode 100644
index 0000000..ff35007
--- /dev/null
+++ b/b3/air/AirLowerEntrySwitch.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Converts code that seems to have one entrypoint and emulates multiple entrypoints with
+// EntrySwitch into code that really has multiple entrypoints. This is accomplished by duplicating
+// the backwards transitive closure from all EntrySwitches.
+void lowerEntrySwitch(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirLowerMacros.cpp b/b3/air/AirLowerMacros.cpp
new file mode 100644
index 0000000..b086b7b
--- /dev/null
+++ b/b3/air/AirLowerMacros.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirLowerMacros.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCCallingConvention.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include "B3CCallValue.h"
+#include "B3ValueInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void lowerMacros(Code& code)
+{
+    PhaseScope phaseScope(code, "lowerMacros");
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+
+            switch (inst.kind.opcode) {
+            case CCall: {
+                CCallValue* value = inst.origin->as();
+                Kind oldKind = inst.kind;
+
+                Vector destinations = computeCCallingConvention(code, value);
+
+                Inst shuffleArguments(Shuffle, value);
+                unsigned offset = value->type() == Void ? 0 : 1;
+                for (unsigned i = 1; i < destinations.size(); ++i) {
+                    Value* child = value->child(i);
+                    shuffleArguments.args.append(inst.args[offset + i]);
+                    shuffleArguments.args.append(destinations[i]);
+                    shuffleArguments.args.append(Arg::widthArg(Arg::widthForB3Type(child->type())));
+                }
+                insertionSet.insertInst(instIndex, WTFMove(shuffleArguments));
+
+                // Indicate that we're using our original callee argument.
+                destinations[0] = inst.args[0];
+
+                // Save where the original instruction put its result.
+                Arg resultDst = value->type() == Void ? Arg() : inst.args[1];
+                
+                inst = buildCCall(code, inst.origin, destinations);
+                if (oldKind.traps)
+                    inst.kind.traps = true;
+
+                Tmp result = cCallResult(value->type());
+                switch (value->type()) {
+                case Void:
+                    break;
+                case Float:
+                    insertionSet.insert(instIndex + 1, MoveFloat, value, result, resultDst);
+                    break;
+                case Double:
+                    insertionSet.insert(instIndex + 1, MoveDouble, value, result, resultDst);
+                    break;
+                case Int32:
+                    insertionSet.insert(instIndex + 1, Move32, value, result, resultDst);
+                    break;
+                case Int64:
+                    insertionSet.insert(instIndex + 1, Move, value, result, resultDst);
+                    break;
+                }
+                break;
+            }
+
+            default:
+                break;
+            }
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirLowerMacros.h b/b3/air/AirLowerMacros.h
new file mode 100644
index 0000000..2dcd76d
--- /dev/null
+++ b/b3/air/AirLowerMacros.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Air has some opcodes that are very high-level and are meant to reduce the amount of low-level
+// knowledge in the B3->Air lowering. The current example is CCall.
+
+void lowerMacros(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirOpcode.opcodes b/b3/air/AirOpcode.opcodes
new file mode 100644
index 0000000..4170f74
--- /dev/null
+++ b/b3/air/AirOpcode.opcodes
@@ -0,0 +1,931 @@
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Syllabus:
+#
+# Examples of some roles, types, and widths:
+# U:G:32 => use of the low 32 bits of a general-purpose register or value
+# D:G:32 => def of the low 32 bits of a general-purpose register or value
+# UD:G:32 => use and def of the low 32 bits of a general-purpose register or value
+# U:G:64 => use of the low 64 bits of a general-purpose register or value
+# ZD:G:32 => def of all bits of a general-purpose register, where all but the low 32 bits are guaranteed to be zeroed.
+# UA:G:Ptr => UseAddr (see comment in Arg.h)
+# U:F:32 => use of a float register or value
+# U:F:64 => use of a double register or value
+# D:F:32 => def of a float register or value
+# UD:F:32 => use and def of a float register or value
+# S:F:32 => scratch float register.
+#
+# Argument kinds:
+# Tmp => temporary or register
+# Imm => 32-bit immediate int
+# BigImm => TrustedImm64
+# Addr => address as temporary/register+offset
+# Index => BaseIndex address
+# Abs => AbsoluteAddress
+#
+# The parser views these things as keywords, and understands that they fall into two distinct classes
+# of things. So, although this file uses a particular indentation style, none of the whitespace or
+# even newlines are meaningful to the parser. For example, you could write:
+#
+# Foo42 U:G:32, UD:F:32 Imm, Tmp Addr, Tmp
+#
+# And the parser would know that this is the same as:
+#
+# Foo42 U:G:32, UD:F:32
+#     Imm, Tmp
+#     Addr, Tmp
+#
+# I.e. a two-form instruction that uses a GPR or an int immediate and uses+defs a float register.
+#
+# Any opcode or opcode form can be preceded with an architecture list, which restricts the opcode to the
+# union of those architectures. For example, if this is the only overload of the opcode, then it makes the
+# opcode only available on x86_64:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+#     Tmp, Tmp
+#     Tmp, Addr
+#
+# But this only restricts the two-operand form, the other form is allowed on all architectures:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+#     Tmp, Tmp
+#     Tmp, Addr
+# Fuzz UD:G:Ptr, D:G:Ptr, U:F:Ptr
+#     Tmp, Tmp, Tmp
+#     Tmp, Addr, Tmp
+#
+# And you can also restrict individual forms:
+#
+# Thingy UD:G:32, D:G:32
+#     Tmp, Tmp
+#     arm64: Tmp, Addr
+#
+# Additionally, you can have an intersection between the architectures of the opcode overload and the
+# form. In this example, the version that takes an address is only available on armv7 while the other
+# versions are available on armv7 or x86_64:
+#
+# x86_64 armv7: Buzz U:G:32, UD:F:32
+#     Tmp, Tmp
+#     Imm, Tmp
+#     armv7: Addr, Tmp
+#
+# Finally, you can specify architectures using helpful architecture groups. Here are all of the
+# architecture keywords that we support:
+#
+# x86: means x86-32 or x86-64.
+# x86_32: means just x86-32.
+# x86_64: means just x86-64.
+# arm: means armv7 or arm64.
+# armv7: means just armv7.
+# arm64: means just arm64.
+# 32: means x86-32 or armv7.
+# 64: means x86-64 or arm64.
+
+# Note that the opcodes here have a leading capital (Add32) but must correspond to MacroAssembler
+# API that has a leading lower-case (add32).
+
+Nop
+
+Add32 U:G:32, U:G:32, ZD:G:32
+    Imm, Tmp, Tmp
+    Tmp, Tmp, Tmp
+
+Add32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Addr
+    x86: Imm, Index
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+    x86: Tmp, Index
+
+x86: Add8 U:G:8, UD:G:8
+    Imm, Addr
+    Imm, Index
+    Tmp, Addr
+    Tmp, Index
+
+x86: Add16 U:G:16, UD:G:16
+    Imm, Addr
+    Imm, Index
+    Tmp, Addr
+    Tmp, Index
+
+64: Add64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+64: Add64 U:G:64, U:G:64, D:G:64
+    Imm, Tmp, Tmp
+    Tmp, Tmp, Tmp
+
+AddDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: AddDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+AddFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: AddFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+Sub32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+arm64: Sub32 U:G:32, U:G:32, D:G:32
+    Tmp, Tmp, Tmp
+
+64: Sub64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Addr
+    Imm, Tmp
+    x86: Addr, Tmp
+    x86: Tmp, Addr
+
+arm64: Sub64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+SubDouble U:F:64, U:F:64, D:F:64
+    arm64: Tmp, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Tmp, Index, Tmp
+
+x86: SubDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+SubFloat U:F:32, U:F:32, D:F:32
+    arm64: Tmp, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Tmp, Index, Tmp
+
+x86: SubFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+Neg32 UZD:G:32
+    Tmp
+    x86: Addr
+
+64: Neg64 UD:G:64
+    Tmp
+
+arm64: NegateDouble U:F:64, D:F:64
+    Tmp, Tmp
+
+arm64: NegateFloat U:F:32, D:F:32
+    Tmp, Tmp
+
+Mul32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+Mul32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Imm, Tmp, Tmp
+
+64: Mul64 U:G:64, UD:G:64
+    Tmp, Tmp
+
+Mul64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd64 U:G:64, U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub64 U:G:64, U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg64 U:G:64, U:G:64, ZD:G:64
+    Tmp, Tmp, Tmp
+
+arm64: Div32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: UDiv32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: Div64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+arm64: UDiv64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+MulDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: MulDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+MulFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+    x86: Addr, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Index, Tmp, Tmp
+
+x86: MulFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+arm64: DivDouble U:F:64, U:F:32, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: DivDouble U:F:64, UD:F:64
+    Tmp, Tmp
+    Addr, Tmp
+
+arm64: DivFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: DivFloat U:F:32, UD:F:32
+    Tmp, Tmp
+    Addr, Tmp
+
+x86: X86ConvertToDoubleWord32 U:G:32, ZD:G:32
+    Tmp*, Tmp*
+
+x86_64: X86ConvertToQuadWord64 U:G:64, D:G:64
+    Tmp*, Tmp*
+
+x86: X86Div32 UZD:G:32, UZD:G:32, U:G:32
+    Tmp*, Tmp*, Tmp
+
+x86: X86UDiv32 UZD:G:32, UZD:G:32, U:G:32
+    Tmp*, Tmp*, Tmp
+
+x86_64: X86Div64 UZD:G:64, UZD:G:64, U:G:64
+    Tmp*, Tmp*, Tmp
+
+x86_64: X86UDiv64 UZD:G:64, UZD:G:64, U:G:64
+    Tmp*, Tmp*, Tmp
+
+Lea32 UA:G:32, D:G:32
+    Addr, Tmp
+    x86: Index, Tmp as x86Lea32
+
+Lea64 UA:G:64, D:G:64
+    Addr, Tmp
+    x86: Index, Tmp as x86Lea64
+
+And32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+And32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: And64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+x86_64: And64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Tmp
+
+AndDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: AndDouble U:F:64, UD:F:64
+    Tmp, Tmp
+
+AndFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: AndFloat U:F:32, UD:F:32
+    Tmp, Tmp
+
+x86: XorDouble U:F:64, U:F:64, D:F:64
+    Tmp, Tmp, Tmp
+
+x86: XorDouble U:F:64, UD:F:64
+    Tmp, Tmp
+
+x86: XorFloat U:F:32, U:F:32, D:F:32
+    Tmp, Tmp, Tmp
+
+x86: XorFloat U:F:32, UD:F:32
+    Tmp, Tmp
+
+arm64: Lshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86:Lshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Lshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Lshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Rshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86: Rshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Rshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Rshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Urshift32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86: Urshift32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: Urshift64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: Urshift64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+x86_64: RotateRight32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: RotateRight32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: RotateRight64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+arm64: RotateRight64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    Tmp, Imm, Tmp
+
+x86_64: RotateLeft32 U:G:32, UZD:G:32
+    Tmp*, Tmp
+    Imm, Tmp
+
+x86_64: RotateLeft64 U:G:64, UD:G:64
+    Tmp*, Tmp
+    Imm, Tmp
+
+Or32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+Or32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: Or64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+64: Or64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Imm, Tmp
+
+Xor32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+    arm64: BitImm, Tmp, Tmp
+    x86: Tmp, Addr, Tmp
+    x86: Addr, Tmp, Tmp
+
+Xor32 U:G:32, UZD:G:32
+    Tmp, Tmp
+    x86: Imm, Tmp
+    x86: Tmp, Addr
+    x86: Addr, Tmp
+    x86: Imm, Addr
+
+64: Xor64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+    arm64: BitImm64, Tmp, Tmp
+
+64: Xor64 U:G:64, UD:G:64
+    Tmp, Tmp
+    x86: Tmp, Addr
+    x86: Imm, Tmp
+
+arm64: Not32 U:G:32, ZD:G:32
+    Tmp, Tmp
+
+x86: Not32 UZD:G:32
+    Tmp
+    Addr
+
+arm64: Not64 U:G:64, D:G:64
+    Tmp, Tmp
+
+x86: Not64 UD:G:64
+    Tmp
+    Addr
+
+arm64: AbsDouble U:F:64, D:F:64
+    Tmp, Tmp
+
+arm64: AbsFloat U:F:32, D:F:32
+    Tmp, Tmp
+
+CeilDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+CeilFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+FloorDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+FloorFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+SqrtDouble U:F:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+SqrtFloat U:F:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertInt32ToDouble U:G:32, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: ConvertInt64ToDouble U:G:64, D:F:64
+    Tmp, Tmp
+    x86_64: Addr, Tmp
+
+ConvertInt32ToFloat U:G:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: ConvertInt64ToFloat U:G:64, D:F:32
+    Tmp, Tmp
+    x86_64: Addr, Tmp
+
+CountLeadingZeros32 U:G:32, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+64: CountLeadingZeros64 U:G:64, D:G:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertDoubleToFloat U:F:64, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+ConvertFloatToDouble U:F:32, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp
+
+# Note that Move operates over the full register size, which is either 32-bit or 64-bit depending on
+# the platform. I'm not entirely sure that this is a good thing; it might be better to just have a
+# Move64 instruction. OTOH, our MacroAssemblers already have this notion of "move()" that basically
+# means movePtr.
+Move U:G:Ptr, D:G:Ptr
+    Tmp, Tmp
+    Imm, Tmp as signExtend32ToPtr
+    BigImm, Tmp
+    Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
+    Index, Tmp as loadPtr
+    Tmp, Addr as storePtr
+    Tmp, Index as storePtr
+    x86: Imm, Addr as storePtr
+
+x86: Swap32 UD:G:32, UD:G:32
+    Tmp, Tmp
+    Tmp, Addr
+
+x86_64: Swap64 UD:G:64, UD:G:64
+    Tmp, Tmp
+    Tmp, Addr
+
+Move32 U:G:32, ZD:G:32
+    Tmp, Tmp as zeroExtend32ToPtr
+    Addr, Tmp as load32
+    Index, Tmp as load32
+    Tmp, Addr as store32
+    Tmp, Index as store32
+    x86: Imm, Tmp as zeroExtend32ToPtr
+    x86: Imm, Addr as store32
+    x86: Imm, Index as store32
+
+StoreZero32 U:G:32
+    Addr
+    Index
+
+SignExtend32ToPtr U:G:32, D:G:Ptr
+    Tmp, Tmp
+
+ZeroExtend8To32 U:G:8, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load8
+    x86: Index, Tmp as load8
+
+SignExtend8To32 U:G:8, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load8SignedExtendTo32
+    x86: Index, Tmp as load8SignedExtendTo32
+
+ZeroExtend16To32 U:G:16, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load16
+    x86: Index, Tmp as load16
+
+SignExtend16To32 U:G:16, ZD:G:32
+    Tmp, Tmp
+    x86: Addr, Tmp as load16SignedExtendTo32
+    x86: Index, Tmp as load16SignedExtendTo32
+
+MoveFloat U:F:32, D:F:32
+    Tmp, Tmp as moveDouble
+    Addr, Tmp as loadFloat
+    Index, Tmp as loadFloat
+    Tmp, Addr as storeFloat
+    Tmp, Index as storeFloat
+
+MoveDouble U:F:64, D:F:64
+    Tmp, Tmp
+    Addr, Tmp as loadDouble
+    Index, Tmp as loadDouble
+    Tmp, Addr as storeDouble
+    Tmp, Index as storeDouble
+
+MoveZeroToDouble D:F:64
+    Tmp
+
+64: Move64ToDouble U:G:64, D:F:64
+    Tmp, Tmp
+    x86: Addr, Tmp as loadDouble
+    Index, Tmp as loadDouble
+
+Move32ToFloat U:G:32, D:F:32
+    Tmp, Tmp
+    x86: Addr, Tmp as loadFloat
+    Index, Tmp as loadFloat
+
+64: MoveDoubleTo64 U:F:64, D:G:64
+    Tmp, Tmp
+    Addr, Tmp as load64
+    Index, Tmp as load64
+
+MoveFloatTo32 U:F:32, D:G:32
+    Tmp, Tmp
+    Addr, Tmp as load32
+    Index, Tmp as load32
+
+Load8 U:G:8, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Store8 U:G:8, D:G:8
+    Tmp, Index
+    Tmp, Addr
+    x86: Imm, Index
+    x86: Imm, Addr
+
+Load8SignedExtendTo32 U:G:8, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Load16 U:G:16, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Load16SignedExtendTo32 U:G:16, ZD:G:32
+    Addr, Tmp
+    Index, Tmp
+
+Store16 U:G:16, D:G:16
+    Tmp, Index
+    Tmp, Addr
+
+Compare32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    RelCond, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp
+
+64: Compare64 U:G:32, U:G:64, U:G:64, ZD:G:32
+    RelCond, Tmp, Tmp, Tmp
+    x86: RelCond, Tmp, Imm, Tmp
+
+Test32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    x86: ResCond, Addr, Imm, Tmp
+    ResCond, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp
+
+64: Test64 U:G:32, U:G:64, U:G:64, ZD:G:32
+    x86: ResCond, Tmp, Imm, Tmp
+    ResCond, Tmp, Tmp, Tmp
+
+CompareDouble U:G:32, U:F:64, U:F:64, ZD:G:32
+    DoubleCond, Tmp, Tmp, Tmp
+
+CompareFloat U:G:32, U:F:32, U:F:32, ZD:G:32
+    DoubleCond, Tmp, Tmp, Tmp
+
+# Note that branches have some logic in AirOptimizeBlockOrder.cpp. If you add new branches, please make sure
+# you opt them into the block order optimizations.
+
+Branch8 U:G:32, U:G:8, U:G:8 /branch
+    x86: RelCond, Addr, Imm
+    x86: RelCond, Index, Imm
+
+Branch32 U:G:32, U:G:32, U:G:32 /branch
+    x86: RelCond, Addr, Imm
+    RelCond, Tmp, Tmp
+    RelCond, Tmp, Imm
+    x86: RelCond, Tmp, Addr
+    x86: RelCond, Addr, Tmp
+    x86: RelCond, Index, Imm
+
+64: Branch64 U:G:32, U:G:64, U:G:64 /branch
+    RelCond, Tmp, Tmp
+    RelCond, Tmp, Imm
+    x86: RelCond, Tmp, Addr
+    x86: RelCond, Addr, Tmp
+    x86: RelCond, Addr, Imm
+    x86: RelCond, Index, Tmp
+
+BranchTest8 U:G:32, U:G:8, U:G:8 /branch
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Index, BitImm
+
+BranchTest32 U:G:32, U:G:32, U:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Tmp, BitImm
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Index, BitImm
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# BranchTest32 in most cases where you use an immediate.
+64: BranchTest64 U:G:32, U:G:64, U:G:64 /branch
+    ResCond, Tmp, Tmp
+    arm64: ResCond, Tmp, BitImm64
+    x86: ResCond, Tmp, BitImm
+    x86: ResCond, Addr, BitImm
+    x86: ResCond, Addr, Tmp
+    x86: ResCond, Index, BitImm
+
+BranchDouble U:G:32, U:F:64, U:F:64 /branch
+    DoubleCond, Tmp, Tmp
+
+BranchFloat U:G:32, U:F:32, U:F:32 /branch
+    DoubleCond, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Tmp, Tmp
+    x86:ResCond, Tmp, Addr, Tmp
+    x86:ResCond, Addr, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Imm, Tmp
+    x86: ResCond, Imm, Addr
+    x86: ResCond, Tmp, Addr
+    x86: ResCond, Addr, Tmp
+
+BranchAdd64 U:G:32, U:G:64, U:G:64, ZD:G:64 /branch
+    ResCond, Tmp, Tmp, Tmp
+    x86:ResCond, Tmp, Addr, Tmp
+    x86:ResCond, Addr, Tmp, Tmp
+
+64: BranchAdd64 U:G:32, U:G:64, UD:G:64 /branch
+    ResCond, Imm, Tmp
+    ResCond, Tmp, Tmp
+    x86:ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Imm, Tmp
+
+arm64: BranchMul32 U:G:32, U:G:32, U:G:32, S:G:32, S:G:32, ZD:G:32 /branch
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+x86_64: BranchMul64 U:G:32, U:G:64, UZD:G:64 /branch
+    ResCond, Tmp, Tmp
+
+arm64: BranchMul64 U:G:32, U:G:64, U:G:64, S:G:64, S:G:64, ZD:G:64 /branch
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+BranchSub32 U:G:32, U:G:32, UZD:G:32 /branch
+    ResCond, Tmp, Tmp
+    ResCond, Imm, Tmp
+    x86: ResCond, Imm, Addr
+    x86: ResCond, Tmp, Addr
+    x86: ResCond, Addr, Tmp
+
+64: BranchSub64 U:G:32, U:G:64, UD:G:64 /branch
+    ResCond, Imm, Tmp
+    ResCond, Tmp, Tmp
+
+BranchNeg32 U:G:32, UZD:G:32 /branch
+    ResCond, Tmp
+
+64: BranchNeg64 U:G:32, UZD:G:64 /branch
+    ResCond, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp
+    x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp
+    x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, UD:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, UD:G:Ptr
+    DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionally32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+    x86: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86: RelCond, Index, Imm, Tmp, Tmp, Tmp
+
+64: MoveDoubleConditionally64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+    RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86_64: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+    x86_64: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86_64: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86_64: RelCond, Index, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyTest32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+    x86: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# MoveDoubleConditionallyTest32 in most cases where you use an immediate.
+64: MoveDoubleConditionallyTest64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+    ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+    x86_64: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+    x86_64: ResCond, Addr, Tmp, Tmp, Tmp, Tmp
+    x86_64: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyDouble U:G:32, U:F:64, U:F:64, U:F:64, U:F:64, D:F:64
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyFloat U:G:32, U:F:32, U:F:32, U:F:64, U:F:64, D:F:64
+    DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MemoryFence /effects
+StoreFence /effects
+LoadFence /effects
+
+Jump /branch
+
+RetVoid /return
+
+Ret32 U:G:32 /return
+    Tmp
+
+64: Ret64 U:G:64 /return
+    Tmp
+
+RetFloat U:F:32 /return
+    Tmp
+
+RetDouble U:F:64 /return
+    Tmp
+
+Oops /terminal
+
+# This is a terminal but we express it as a Custom because we don't want it to have a code
+# generator.
+custom EntrySwitch
+
+# A Shuffle is a multi-source, multi-destination move. It simultaneously does multiple moves at once.
+# The moves are specified as triplets of src, dst, and width. For example you can request a swap this
+# way:
+#     Shuffle %tmp1, %tmp2, 64, %tmp2, %tmp1, 64
+custom Shuffle
+
+# Air allows for exotic behavior. A Patch's behavior is determined entirely by the Special operand,
+# which must be the first operand.
+custom Patch
+
+# Instructions used for lowering C calls. These don't make it to Air generation. They get lowered to
+# something else first. The origin Value must be a CCallValue.
+custom CCall
+custom ColdCCall
+
+# This is a special wasm opcode that branches to a trap handler. This uses the generator located to Air::Code
+# to produce the side-exit code.
+custom WasmBoundsCheck
+
diff --git a/b3/air/AirOptimizeBlockOrder.cpp b/b3/air/AirOptimizeBlockOrder.cpp
new file mode 100644
index 0000000..11ca3f3
--- /dev/null
+++ b/b3/air/AirOptimizeBlockOrder.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirOptimizeBlockOrder.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class SortedSuccessors {
+public:
+    SortedSuccessors()
+    {
+    }
+
+    void append(BasicBlock* block)
+    {
+        m_successors.append(block);
+    }
+
+    void process(BlockWorklist& worklist)
+    {
+        // We prefer a stable sort, and we don't want it to go off the rails if we see NaN. Also, the number
+        // of successors is bounded. In fact, it currently cannot be more than 2. :-)
+        bubbleSort(
+            m_successors.begin(), m_successors.end(),
+            [] (BasicBlock* left, BasicBlock* right) {
+                return left->frequency() < right->frequency();
+            });
+
+        // Pushing the successors in ascending order of frequency ensures that the very next block we visit
+        // is our highest-frequency successor (unless that successor has already been visited).
+        for (unsigned i = 0; i < m_successors.size(); ++i)
+            worklist.push(m_successors[i]);
+        
+        m_successors.resize(0);
+    }
+
+private:
+    Vector m_successors;
+};
+
+} // anonymous namespace
+
+Vector blocksInOptimizedOrder(Code& code)
+{
+    Vector blocksInOrder;
+
+    BlockWorklist fastWorklist;
+    SortedSuccessors sortedSuccessors;
+    SortedSuccessors sortedSlowSuccessors;
+    
+    // We expect entrypoint lowering to have already happened.
+    RELEASE_ASSERT(code.numEntrypoints());
+
+    auto appendSuccessor = [&] (const FrequentedBlock& block) {
+        if (block.isRare())
+            sortedSlowSuccessors.append(block.block());
+        else
+            sortedSuccessors.append(block.block());
+    };
+    
+    // For everything but the first entrypoint, we push them in order of frequency and frequency
+    // class.
+    for (unsigned i = 1; i < code.numEntrypoints(); ++i)
+        appendSuccessor(code.entrypoint(i));
+    
+    // Always push the primary successor last so that it gets highest priority.
+    fastWorklist.push(code.entrypoint(0).block());
+    
+    while (BasicBlock* block = fastWorklist.pop()) {
+        blocksInOrder.append(block);
+        for (FrequentedBlock& successor : block->successors())
+            appendSuccessor(successor);
+        sortedSuccessors.process(fastWorklist);
+    }
+
+    BlockWorklist slowWorklist;
+    sortedSlowSuccessors.process(slowWorklist);
+
+    while (BasicBlock* block = slowWorklist.pop()) {
+        // We might have already processed this block.
+        if (fastWorklist.saw(block))
+            continue;
+        
+        blocksInOrder.append(block);
+        for (BasicBlock* successor : block->successorBlocks())
+            sortedSuccessors.append(successor);
+        sortedSuccessors.process(slowWorklist);
+    }
+
+    ASSERT(fastWorklist.isEmpty());
+    ASSERT(slowWorklist.isEmpty());
+
+    return blocksInOrder;
+}
+
+void optimizeBlockOrder(Code& code)
+{
+    PhaseScope phaseScope(code, "optimizeBlockOrder");
+
+    Vector blocksInOrder = blocksInOptimizedOrder(code);
+    
+    // Place blocks into Code's block list according to the ordering in blocksInOrder. We do this by leaking
+    // all of the blocks and then readopting them.
+    for (auto& entry : code.blockList())
+        entry.release();
+
+    code.blockList().resize(0);
+
+    for (unsigned i = 0; i < blocksInOrder.size(); ++i) {
+        BasicBlock* block = blocksInOrder[i];
+        block->setIndex(i);
+        code.blockList().append(std::unique_ptr(block));
+    }
+
+    // Finally, flip any branches that we recognize. It's most optimal if the taken successor does not point
+    // at the next block.
+    for (BasicBlock* block : code) {
+        Inst& branch = block->last();
+
+        // It's somewhat tempting to just say that if the block has two successors and the first arg is
+        // invertible, then we can do the optimization. But that's wagging the dog. The fact that an
+        // instruction happens to have an argument that is invertible doesn't mean it's a branch, even though
+        // it is true that currently only branches have invertible arguments. It's also tempting to say that
+        // the /branch flag in AirOpcode.opcodes tells us that something is a branch - except that there,
+        // /branch also means Jump. The approach taken here means that if you add new branch instructions and
+        // forget about this phase, then at worst your new instructions won't opt into the inversion
+        // optimization.  You'll probably realize that as soon as you look at the disassembly, and it
+        // certainly won't cause any correctness issues.
+        
+        switch (branch.kind.opcode) {
+        case Branch8:
+        case Branch32:
+        case Branch64:
+        case BranchTest8:
+        case BranchTest32:
+        case BranchTest64:
+        case BranchFloat:
+        case BranchDouble:
+        case BranchAdd32:
+        case BranchAdd64:
+        case BranchMul32:
+        case BranchMul64:
+        case BranchSub32:
+        case BranchSub64:
+        case BranchNeg32:
+        case BranchNeg64:
+            if (code.findNextBlock(block) == block->successorBlock(0) && branch.args[0].isInvertible()) {
+                std::swap(block->successor(0), block->successor(1));
+                branch.args[0] = branch.args[0].inverted();
+            }
+            break;
+            
+        default:
+            break;
+        }
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirOptimizeBlockOrder.h b/b3/air/AirOptimizeBlockOrder.h
new file mode 100644
index 0000000..3911fcc
--- /dev/null
+++ b/b3/air/AirOptimizeBlockOrder.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class BasicBlock;
+class Code;
+
+// Returns a list of blocks sorted according to what would be the current optimal order. This shares
+// some properties with a pre-order traversal. In particular, each block will appear after at least
+// one of its predecessors.
+Vector blocksInOptimizedOrder(Code&);
+
+// Reorders the basic blocks to keep hot blocks at the top, and maximize the likelihood that a frequently
+// taken edge is just a fall-through.
+
+void optimizeBlockOrder(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirPadInterference.cpp b/b3/air/AirPadInterference.cpp
new file mode 100644
index 0000000..91de56b
--- /dev/null
+++ b/b3/air/AirPadInterference.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirPadInterference.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void padInterference(Code& code)
+{
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        bool prevHadLate = false;
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            Inst& inst = block->at(instIndex);
+            
+            bool hasEarlyDef = false;
+            bool hasLate = false;
+            inst.forEachArg(
+                [&] (Arg&, Arg::Role role, Arg::Type, Arg::Width) {
+                    switch (role) {
+                    case Arg::EarlyDef:
+                        hasEarlyDef = true;
+                        break;
+                    case Arg::LateUse:
+                    case Arg::Def:
+                    case Arg::ZDef:
+                    case Arg::LateColdUse:
+                    case Arg::UseDef:
+                    case Arg::UseZDef:
+                        hasLate = true;
+                        break;
+                    case Arg::Scratch:
+                        hasEarlyDef = true;
+                        hasLate = true;
+                        break;
+                    case Arg::Use:
+                    case Arg::ColdUse:
+                    case Arg::UseAddr:
+                        break;
+                    }
+                });
+            if (inst.kind.opcode == Patch) {
+                hasEarlyDef |= !inst.extraEarlyClobberedRegs().isEmpty();
+                hasLate |= !inst.extraClobberedRegs().isEmpty();
+            }
+            
+            if (hasEarlyDef && prevHadLate)
+                insertionSet.insert(instIndex, Nop, inst.origin);
+            
+            prevHadLate = hasLate;
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirPadInterference.h b/b3/air/AirPadInterference.h
new file mode 100644
index 0000000..18f8083
--- /dev/null
+++ b/b3/air/AirPadInterference.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This isn't a phase - it's meant to be a utility that other phases use. Air reasons about liveness by
+// reasoning about interference at boundaries between instructions. This can go wrong - for example, a
+// late use in one instruction doesn't actually interfere with an early def of the next instruction, but
+// Air thinks that it does. This is convenient because it works great in the most common case: early uses
+// and late defs. In practice, only the register allocators need to use this, since only they need to be
+// able to color the interference graph using a bounded number of colors.
+//
+// See https://bugs.webkit.org/show_bug.cgi?id=163548#c2 for more info.
+
+void padInterference(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirPhaseScope.cpp b/b3/air/AirPhaseScope.cpp
new file mode 100644
index 0000000..062ea24
--- /dev/null
+++ b/b3/air/AirPhaseScope.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirPhaseScope.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirValidate.h"
+#include "B3Common.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+PhaseScope::PhaseScope(Code& code, const char* name)
+    : m_code(code)
+    , m_name(name)
+    , m_timingScope(name)
+{
+    if (shouldDumpIRAtEachPhase(AirMode)) {
+        dataLog("Air after ", code.lastPhaseName(), ", before ", name, ":\n");
+        dataLog(code);
+    }
+
+    if (shouldSaveIRBeforePhase())
+        m_dumpBefore = toCString(code);
+}
+
+PhaseScope::~PhaseScope()
+{
+    m_code.setLastPhaseName(m_name);
+    if (shouldValidateIRAtEachPhase())
+        validate(m_code, m_dumpBefore.data());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirPhaseScope.h b/b3/air/AirPhaseScope.h
new file mode 100644
index 0000000..71f788f
--- /dev/null
+++ b/b3/air/AirPhaseScope.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "B3TimingScope.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class PhaseScope {
+    WTF_MAKE_NONCOPYABLE(PhaseScope);
+public:
+    PhaseScope(Code&, const char* name);
+    ~PhaseScope(); // this does validation
+
+private:
+    Code& m_code;
+    const char* m_name;
+    TimingScope m_timingScope;
+    CString m_dumpBefore;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirReportUsedRegisters.cpp b/b3/air/AirReportUsedRegisters.cpp
new file mode 100644
index 0000000..bb0aeab
--- /dev/null
+++ b/b3/air/AirReportUsedRegisters.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirReportUsedRegisters.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void reportUsedRegisters(Code& code)
+{
+    PhaseScope phaseScope(code, "reportUsedRegisters");
+
+    RegLiveness liveness(code);
+
+    for (BasicBlock* block : code) {
+        RegLiveness::LocalCalc localCalc(liveness, block);
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            Inst& inst = block->at(instIndex);
+
+            // Kill dead assignments to registers. For simplicity we say that a store is killable if
+            // it has only late defs and those late defs are to registers that are dead right now.
+            if (!inst.hasNonArgEffects()) {
+                bool canDelete = true;
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        if (Arg::isEarlyDef(role)) {
+                            canDelete = false;
+                            return;
+                        }
+                        if (!Arg::isLateDef(role))
+                            return;
+                        if (!arg.isReg()) {
+                            canDelete = false;
+                            return;
+                        }
+                        if (localCalc.isLive(arg.reg())) {
+                            canDelete = false;
+                            return;
+                        }
+                    });
+                if (canDelete)
+                    inst = Inst();
+            }
+            
+            if (inst.kind.opcode == Patch) {
+                RegisterSet registerSet;
+                for (Reg reg : localCalc.live())
+                    registerSet.set(reg);
+                inst.reportUsedRegisters(registerSet);
+            }
+            localCalc.execute(instIndex);
+        }
+        
+        block->insts().removeAllMatching(
+            [&] (const Inst& inst) -> bool {
+                return !inst;
+            });
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/b3/air/AirReportUsedRegisters.h b/b3/air/AirReportUsedRegisters.h
new file mode 100644
index 0000000..ea175dc
--- /dev/null
+++ b/b3/air/AirReportUsedRegisters.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Performs a liveness analysis over registers and reports the live registers to every Special. Takes
+// the opportunity to kill dead assignments to registers, since it has access to register liveness.
+
+void reportUsedRegisters(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirSimplifyCFG.cpp b/b3/air/AirSimplifyCFG.cpp
new file mode 100644
index 0000000..c66f63f
--- /dev/null
+++ b/b3/air/AirSimplifyCFG.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSimplifyCFG.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirPhaseScope.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+bool simplifyCFG(Code& code)
+{
+    const bool verbose = false;
+    
+    PhaseScope phaseScope(code, "simplifyCFG");
+    
+    // We have three easy simplification rules:
+    //
+    // 1) If a successor is a block that just jumps to another block, then jump directly to
+    //    that block.
+    //
+    // 2) If all successors are the same and the operation has no effects, then use a jump
+    //    instead.
+    //
+    // 3) If you jump to a block that is not you and has one predecessor, then merge.
+    //
+    // Note that because of the first rule, this phase may introduce critical edges. That's fine.
+    // If you need broken critical edges, then you have to break them yourself.
+
+    bool result = false;
+    for (;;) {
+        if (verbose) {
+            dataLog("Air before an iteration of simplifyCFG:\n");
+            dataLog(code);
+        }
+        
+        bool changed = false;
+        for (BasicBlock* block : code) {
+            // We rely on predecessors being conservatively correct. Verify this here.
+            if (shouldValidateIRAtEachPhase()) {
+                for (BasicBlock* block : code) {
+                    for (BasicBlock* successor : block->successorBlocks())
+                        RELEASE_ASSERT(successor->containsPredecessor(block));
+                }
+            }
+
+            // We don't care about blocks that don't have successors.
+            if (!block->numSuccessors())
+                continue;
+
+            // First check if any of the successors of this block can be forwarded over.
+            for (BasicBlock*& successor : block->successorBlocks()) {
+                if (successor != block
+                    && successor->size() == 1
+                    && successor->last().kind.opcode == Jump) {
+                    BasicBlock* newSuccessor = successor->successorBlock(0);
+                    if (newSuccessor != successor) {
+                        if (verbose) {
+                            dataLog(
+                                "Replacing ", pointerDump(block), "->", pointerDump(successor),
+                                " with ", pointerDump(block), "->", pointerDump(newSuccessor), "\n");
+                        }
+                        // Note that we do not do replacePredecessor() because the block we're
+                        // skipping will still have newSuccessor as its successor.
+                        newSuccessor->addPredecessor(block);
+                        successor = newSuccessor;
+                        changed = true;
+                    }
+                }
+            }
+
+            // Now check if the block's terminal can be replaced with a jump. The terminal must not
+            // have weird effects.
+            if (block->numSuccessors() > 1 
+                && !block->last().hasNonControlEffects()) {
+                // All of the successors must be the same.
+                bool allSame = true;
+                BasicBlock* firstSuccessor = block->successorBlock(0);
+                for (unsigned i = 1; i < block->numSuccessors(); ++i) {
+                    if (block->successorBlock(i) != firstSuccessor) {
+                        allSame = false;
+                        break;
+                    }
+                }
+                if (allSame) {
+                    if (verbose)
+                        dataLog("Changing ", pointerDump(block), "'s terminal to a Jump.\n");
+                    block->last() = Inst(Jump, block->last().origin);
+                    block->successors().resize(1);
+                    block->successors()[0].frequency() = FrequencyClass::Normal;
+                    changed = true;
+                }
+            }
+
+            // Finally handle jumps to a block with one predecessor.
+            if (block->numSuccessors() == 1
+                && !block->last().hasNonControlEffects()) {
+                BasicBlock* successor = block->successorBlock(0);
+                if (successor != block && successor->numPredecessors() == 1) {
+                    RELEASE_ASSERT(successor->predecessor(0) == block);
+
+                    // We can merge the two blocks because the predecessor only jumps to the successor
+                    // and the successor is only reachable from the predecessor.
+
+                    // Remove the terminal.
+                    Value* origin = block->insts().takeLast().origin;
+
+                    // Append the full contents of the successor to the predecessor.
+                    block->insts().reserveCapacity(block->size() + successor->size());
+                    for (Inst& inst : *successor)
+                        block->appendInst(WTFMove(inst));
+
+                    // Make sure that our successors are the successor's successors.
+                    block->successors() = WTFMove(successor->successors());
+
+                    // Make sure that the successor has nothing left in it except an oops.
+                    successor->resize(1);
+                    successor->last() = Inst(Oops, origin);
+                    successor->successors().clear();
+
+                    // Ensure that the predecessors of block's new successors know what's up.
+                    for (BasicBlock* newSuccessor : block->successorBlocks())
+                        newSuccessor->replacePredecessor(successor, block);
+
+                    if (verbose)
+                        dataLog("Merged ", pointerDump(block), "->", pointerDump(successor), "\n");
+                    changed = true;
+                }
+            }
+        }
+
+        if (!changed)
+            break;
+        result = true;
+        code.resetReachability();
+    }
+
+    return result;
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+
diff --git a/b3/air/AirSimplifyCFG.h b/b3/air/AirSimplifyCFG.h
new file mode 100644
index 0000000..7ac510d
--- /dev/null
+++ b/b3/air/AirSimplifyCFG.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Simplifies the control flow graph by removing jump-only blocks and merging jumps.
+
+bool simplifyCFG(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirSpecial.cpp b/b3/air/AirSpecial.cpp
new file mode 100644
index 0000000..e825767
--- /dev/null
+++ b/b3/air/AirSpecial.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSpecial.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+const char* const Special::dumpPrefix = "&";
+
+Special::Special()
+{
+}
+
+Special::~Special()
+{
+}
+
+CString Special::name() const
+{
+    StringPrintStream out;
+    dumpImpl(out);
+    return out.toCString();
+}
+
+std::optional Special::shouldTryAliasingDef(Inst&)
+{
+    return std::nullopt;
+}
+
+bool Special::isTerminal(Inst&)
+{
+    return false;
+}
+
+bool Special::hasNonArgEffects(Inst&)
+{
+    return true;
+}
+
+bool Special::hasNonArgNonControlEffects(Inst&)
+{
+    return true;
+}
+
+void Special::dump(PrintStream& out) const
+{
+    out.print(dumpPrefix);
+    dumpImpl(out);
+    if (m_index != UINT_MAX)
+        out.print(m_index);
+}
+
+void Special::deepDump(PrintStream& out) const
+{
+    out.print(*this, ": ");
+    deepDumpImpl(out);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirSpecial.h b/b3/air/AirSpecial.h
new file mode 100644
index 0000000..480cbfc
--- /dev/null
+++ b/b3/air/AirSpecial.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirInst.h"
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+struct GenerationContext;
+
+class Special {
+    WTF_MAKE_NONCOPYABLE(Special);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    static const char* const dumpPrefix;
+    
+    Special();
+    virtual ~Special();
+
+    Code& code() const { return *m_code; }
+
+    CString name() const;
+
+    virtual void forEachArg(Inst&, const ScopedLambda&) = 0;
+    virtual bool isValid(Inst&) = 0;
+    virtual bool admitsStack(Inst&, unsigned argIndex) = 0;
+    virtual std::optional shouldTryAliasingDef(Inst&);
+
+    // This gets called on for each Inst that uses this Special. Note that there is no way to
+    // guarantee that a Special gets used from just one Inst, because Air might taildup late. So,
+    // if you want to pass this information down to generate(), then you have to either:
+    //
+    // 1) Generate Air that starts with a separate Special per Patch Inst, and then merge
+    //    usedRegister sets. This is probably not great, but it optimizes for the common case that
+    //    Air didn't duplicate code or that such duplication didn't cause any interesting changes to
+    //    register assignment.
+    //
+    // 2) Have the Special maintain a HashMap. This works because the analysis
+    //    that feeds into this call is performed just before code generation and there is no way
+    //    for the Vector<>'s that contain the Insts to be reallocated. This allows generate() to
+    //    consult the HashMap.
+    //
+    // 3) Hybrid: you could use (1) and fire up a HashMap if you see multiple calls.
+    //
+    // Note that it's not possible to rely on reportUsedRegisters() being called in the same order
+    // as generate(). If we could rely on that, then we could just have each Special instance
+    // maintain a Vector of RegisterSet's and then process that vector in the right order in
+    // generate(). But, the ordering difference is unlikely to change since it would harm the
+    // performance of the liveness analysis.
+    //
+    // Currently, we do (1) for B3 stackmaps.
+    virtual void reportUsedRegisters(Inst&, const RegisterSet&) = 0;
+    
+    virtual CCallHelpers::Jump generate(Inst&, CCallHelpers&, GenerationContext&) = 0;
+
+    virtual RegisterSet extraEarlyClobberedRegs(Inst&) = 0;
+    virtual RegisterSet extraClobberedRegs(Inst&) = 0;
+    
+    // By default, this returns false.
+    virtual bool isTerminal(Inst&);
+
+    // By default, this returns true.
+    virtual bool hasNonArgEffects(Inst&);
+
+    // By default, this returns true.
+    virtual bool hasNonArgNonControlEffects(Inst&);
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+protected:
+    virtual void dumpImpl(PrintStream&) const = 0;
+    virtual void deepDumpImpl(PrintStream&) const = 0;
+
+private:
+    friend class Code;
+    friend class SparseCollection;
+
+    unsigned m_index { UINT_MAX };
+    Code* m_code { nullptr };
+};
+
+class DeepSpecialDump {
+public:
+    DeepSpecialDump(const Special* special)
+        : m_special(special)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_special)
+            m_special->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const Special* m_special;
+};
+
+inline DeepSpecialDump deepDump(const Special* special)
+{
+    return DeepSpecialDump(special);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirSpillEverything.cpp b/b3/air/AirSpillEverything.cpp
new file mode 100644
index 0000000..ebf3774
--- /dev/null
+++ b/b3/air/AirSpillEverything.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirSpillEverything.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirCode.h"
+#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
+#include "AirLiveness.h"
+#include "AirPadInterference.h"
+#include "AirPhaseScope.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+void spillEverything(Code& code)
+{
+    PhaseScope phaseScope(code, "spillEverything");
+    
+    padInterference(code);
+
+    // We want to know the set of registers used at every point in every basic block.
+    IndexMap> usedRegisters(code.size());
+    GPLiveness gpLiveness(code);
+    FPLiveness fpLiveness(code);
+    for (BasicBlock* block : code) {
+        GPLiveness::LocalCalc gpLocalCalc(gpLiveness, block);
+        FPLiveness::LocalCalc fpLocalCalc(fpLiveness, block);
+
+        usedRegisters[block].resize(block->size() + 1);
+
+        auto setUsedRegisters = [&] (unsigned index) {
+            RegisterSet& registerSet = usedRegisters[block][index];
+            for (Tmp tmp : gpLocalCalc.live()) {
+                if (tmp.isReg())
+                    registerSet.set(tmp.reg());
+            }
+            for (Tmp tmp : fpLocalCalc.live()) {
+                if (tmp.isReg())
+                    registerSet.set(tmp.reg());
+            }
+
+            // Gotta account for dead assignments to registers. These may happen because the input
+            // code is suboptimal.
+            Inst::forEachDefWithExtraClobberedRegs(
+                block->get(index - 1), block->get(index),
+                [&] (const Tmp& tmp, Arg::Role, Arg::Type, Arg::Width) {
+                    if (tmp.isReg())
+                        registerSet.set(tmp.reg());
+                });
+        };
+
+        for (unsigned instIndex = block->size(); instIndex--;) {
+            setUsedRegisters(instIndex + 1);
+            gpLocalCalc.execute(instIndex);
+            fpLocalCalc.execute(instIndex);
+        }
+        setUsedRegisters(0);
+    }
+
+    // Allocate a stack slot for each tmp.
+    Vector allStackSlots[Arg::numTypes];
+    for (unsigned typeIndex = 0; typeIndex < Arg::numTypes; ++typeIndex) {
+        Vector& stackSlots = allStackSlots[typeIndex];
+        Arg::Type type = static_cast(typeIndex);
+        stackSlots.resize(code.numTmps(type));
+        for (unsigned tmpIndex = code.numTmps(type); tmpIndex--;)
+            stackSlots[tmpIndex] = code.addStackSlot(8, StackSlotKind::Spill);
+    }
+
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+            RegisterSet& setBefore = usedRegisters[block][instIndex];
+            RegisterSet& setAfter = usedRegisters[block][instIndex + 1];
+            Inst& inst = block->at(instIndex);
+
+            // First try to spill directly.
+            for (unsigned i = 0; i < inst.args.size(); ++i) {
+                Arg& arg = inst.args[i];
+
+                if (arg.isTmp()) {
+                    if (arg.isReg())
+                        continue;
+
+                    if (inst.admitsStack(i)) {
+                        StackSlot* stackSlot = allStackSlots[arg.type()][arg.tmpIndex()];
+                        arg = Arg::stack(stackSlot);
+                        continue;
+                    }
+                }
+            }
+
+            // Now fall back on spilling using separate Move's to load/store the tmp.
+            inst.forEachTmp(
+                [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width) {
+                    if (tmp.isReg())
+                        return;
+                    
+                    StackSlot* stackSlot = allStackSlots[type][tmp.tmpIndex()];
+                    Arg arg = Arg::stack(stackSlot);
+
+                    // Need to figure out a register to use. How we do that depends on the role.
+                    Reg chosenReg;
+                    switch (role) {
+                    case Arg::Use:
+                    case Arg::ColdUse:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setBefore.get(reg)) {
+                                setBefore.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::Def:
+                    case Arg::ZDef:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setAfter.get(reg)) {
+                                setAfter.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::UseDef:
+                    case Arg::UseZDef:
+                    case Arg::LateUse:
+                    case Arg::LateColdUse:
+                    case Arg::Scratch:
+                    case Arg::EarlyDef:
+                        for (Reg reg : code.regsInPriorityOrder(type)) {
+                            if (!setBefore.get(reg) && !setAfter.get(reg)) {
+                                setAfter.set(reg);
+                                setBefore.set(reg);
+                                chosenReg = reg;
+                                break;
+                            }
+                        }
+                        break;
+                    case Arg::UseAddr:
+                        // We will never UseAddr a Tmp, that doesn't make sense.
+                        RELEASE_ASSERT_NOT_REACHED();
+                        break;
+                    }
+                    RELEASE_ASSERT(chosenReg);
+
+                    tmp = Tmp(chosenReg);
+
+                    Opcode move = type == Arg::GP ? Move : MoveDouble;
+
+                    if (Arg::isAnyUse(role) && role != Arg::Scratch)
+                        insertionSet.insert(instIndex, move, inst.origin, arg, tmp);
+                    if (Arg::isAnyDef(role))
+                        insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+                });
+        }
+        insertionSet.execute(block);
+    }
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirSpillEverything.h b/b3/air/AirSpillEverything.h
new file mode 100644
index 0000000..0fdca66
--- /dev/null
+++ b/b3/air/AirSpillEverything.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a phase for testing. It behaves like a register allocator in the sense that it
+// eliminates temporaries from the program. It accomplishes this by always spilling all
+// temporaries. The resulting code is going to be very inefficient. This phase is great if you
+// think that there is a bug in the register allocator. You can confirm this by running this
+// phase instead of the register allocator.
+//
+// Note that even though this phase does the cheapest thing possible, it's not even written in a
+// particularly efficient way. So, don't get any ideas about using this phase to reduce compiler
+// latency. If you wanted to do that, you should come up with a clever algorithm instead of using
+// this silly thing.
+
+void spillEverything(Code&);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirStackSlot.cpp b/b3/air/AirStackSlot.cpp
new file mode 100644
index 0000000..58cac06
--- /dev/null
+++ b/b3/air/AirStackSlot.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirStackSlot.h"
+
+#if ENABLE(B3_JIT)
+
+#include "B3StackSlot.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+void StackSlot::setOffsetFromFP(intptr_t value)
+{
+    m_offsetFromFP = value;
+    if (m_b3Slot)
+        m_b3Slot->m_offsetFromFP = value;
+}
+
+unsigned StackSlot::jsHash() const
+{
+    return static_cast(m_kind) + m_byteSize * 3 + m_offsetFromFP * 7;
+}
+
+void StackSlot::dump(PrintStream& out) const
+{
+    if (isSpill())
+        out.print("spill");
+    else
+        out.print("stack");
+    out.print(m_index);
+}
+
+void StackSlot::deepDump(PrintStream& out) const
+{
+    out.print("byteSize = ", m_byteSize, ", offsetFromFP = ", m_offsetFromFP, ", kind = ", m_kind);
+    if (m_b3Slot)
+        out.print(", b3Slot = ", *m_b3Slot, ": (", B3::deepDump(m_b3Slot), ")");
+}
+
+StackSlot::StackSlot(unsigned byteSize, StackSlotKind kind, B3::StackSlot* b3Slot)
+    : m_byteSize(byteSize)
+    , m_offsetFromFP(b3Slot ? b3Slot->offsetFromFP() : 0)
+    , m_kind(kind)
+    , m_b3Slot(b3Slot)
+{
+    ASSERT(byteSize);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirStackSlot.h b/b3/air/AirStackSlot.h
new file mode 100644
index 0000000..85c94ac
--- /dev/null
+++ b/b3/air/AirStackSlot.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirStackSlotKind.h"
+#include "B3SparseCollection.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC { namespace B3 {
+
+class StackSlot;
+
+namespace Air {
+
+class StackSlot {
+    WTF_MAKE_NONCOPYABLE(StackSlot);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    unsigned byteSize() const { return m_byteSize; }
+    StackSlotKind kind() const { return m_kind; }
+    bool isLocked() const { return m_kind == StackSlotKind::Locked; }
+    bool isSpill() const { return m_kind == StackSlotKind::Spill; }
+    unsigned index() const { return m_index; }
+
+    void ensureSize(unsigned requestedSize)
+    {
+        ASSERT(!m_offsetFromFP);
+        m_byteSize = std::max(m_byteSize, requestedSize);
+    }
+
+    unsigned alignment() const
+    {
+        if (byteSize() <= 1)
+            return 1;
+        if (byteSize() <= 2)
+            return 2;
+        if (byteSize() <= 4)
+            return 4;
+        return 8;
+    }
+
+    B3::StackSlot* b3Slot() const { return m_b3Slot; }
+
+    // Zero means that it's not yet assigned.
+    intptr_t offsetFromFP() const { return m_offsetFromFP; }
+
+    // This should usually just be called from phases that do stack allocation. But you can
+    // totally force a stack slot to land at some offset.
+    void setOffsetFromFP(intptr_t);
+    
+    // This computes a hash for comparing this to JSAir's StackSlot.
+    unsigned jsHash() const;
+
+    void dump(PrintStream&) const;
+    void deepDump(PrintStream&) const;
+
+private:
+    friend class Code;
+    friend class SparseCollection;
+
+    StackSlot(unsigned byteSize, StackSlotKind, B3::StackSlot*);
+    
+    unsigned m_byteSize { 0 };
+    unsigned m_index { UINT_MAX };
+    intptr_t m_offsetFromFP { 0 };
+    StackSlotKind m_kind { StackSlotKind::Locked };
+    B3::StackSlot* m_b3Slot { nullptr };
+};
+
+class DeepStackSlotDump {
+public:
+    DeepStackSlotDump(const StackSlot* slot)
+        : m_slot(slot)
+    {
+    }
+
+    void dump(PrintStream& out) const
+    {
+        if (m_slot)
+            m_slot->deepDump(out);
+        else
+            out.print("");
+    }
+
+private:
+    const StackSlot* m_slot;
+};
+
+inline DeepStackSlotDump deepDump(const StackSlot* slot)
+{
+    return DeepStackSlotDump(slot);
+}
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::B3::Air::StackSlot* stackSlot)
+{
+    out.print(pointerDump(stackSlot));
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirStackSlotKind.cpp b/b3/air/AirStackSlotKind.cpp
new file mode 100644
index 0000000..af83de1
--- /dev/null
+++ b/b3/air/AirStackSlotKind.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirStackSlotKind.h"
+
+#if ENABLE(B3_JIT)
+
+#include 
+
+namespace WTF {
+
+using namespace JSC::B3::Air;
+
+void printInternal(PrintStream& out, StackSlotKind kind)
+{
+    switch (kind) {
+    case StackSlotKind::Locked:
+        out.print("Locked");
+        return;
+    case StackSlotKind::Spill:
+        out.print("Spill");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirStackSlotKind.h b/b3/air/AirStackSlotKind.h
new file mode 100644
index 0000000..9ef2057
--- /dev/null
+++ b/b3/air/AirStackSlotKind.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+enum class StackSlotKind : uint8_t {
+    // A locked stack slot is an area of stack requested by the client. It cannot be killed. The
+    // client can get its FP offset and write to it from stack walking code, so we must assume
+    // that reads and writes to a locked stack slot can be clobbered the same way as reads and
+    // writes to any memory location.
+    Locked,
+
+    // A spill slot. These have fundamentally different behavior than a typical memory location.
+    // They are lowered to from temporaries. This means for example that a 32-bit ZDef store to a
+    // 8 byte stack slot will zero the top 4 bytes, even though a 32-bit ZDef store to any other
+    // kind of memory location would do no such thing. UseAddr on a spill slot is not allowed, so
+    // they never escape.
+    Spill
+
+    // FIXME: We should add a third mode, which means that the stack slot will be read asynchronously
+    // as with Locked, but never written to asynchronously. Then, Air could optimize spilling and
+    // filling by tracking whether the value had been stored to a read-only locked slot. If it had,
+    // then we can refill from that slot.
+    // https://bugs.webkit.org/show_bug.cgi?id=150587
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::B3::Air::StackSlotKind);
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirTmp.cpp b/b3/air/AirTmp.cpp
new file mode 100644
index 0000000..487f521
--- /dev/null
+++ b/b3/air/AirTmp.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirTmp.h"
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+void Tmp::dump(PrintStream& out) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+
+    if (isReg()) {
+        out.print(reg());
+        return;
+    }
+
+    if (isGP()) {
+        out.print("%tmp", gpTmpIndex());
+        return;
+    }
+
+    out.print("%ftmp", fpTmpIndex());
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirTmp.h b/b3/air/AirTmp.h
new file mode 100644
index 0000000..c01427c
--- /dev/null
+++ b/b3/air/AirTmp.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "Reg.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Arg;
+
+// A Tmp is a generalization of a register. It can be used to refer to any GPR or FPR. It can also
+// be used to refer to an unallocated register (i.e. a temporary). Like many Air classes, we use
+// deliberately terse naming since we will have to use this name a lot.
+
+class Tmp {
+public:
+    Tmp()
+        : m_value(0)
+    {
+    }
+
+    explicit Tmp(Reg reg)
+    {
+        if (reg) {
+            if (reg.isGPR())
+                m_value = encodeGPR(reg.gpr());
+            else
+                m_value = encodeFPR(reg.fpr());
+        } else
+            m_value = 0;
+    }
+
+    explicit Tmp(const Arg&);
+
+    static Tmp gpTmpForIndex(unsigned index)
+    {
+        Tmp result;
+        result.m_value = encodeGPTmp(index);
+        return result;
+    }
+
+    static Tmp fpTmpForIndex(unsigned index)
+    {
+        Tmp result;
+        result.m_value = encodeFPTmp(index);
+        return result;
+    }
+
+    explicit operator bool() const { return !!m_value; }
+
+    bool isGP() const
+    {
+        return isEncodedGP(m_value);
+    }
+
+    bool isFP() const
+    {
+        return isEncodedFP(m_value);
+    }
+
+    bool isGPR() const
+    {
+        return isEncodedGPR(m_value);
+    }
+
+    bool isFPR() const
+    {
+        return isEncodedFPR(m_value);
+    }
+
+    bool isReg() const
+    {
+        return isGPR() || isFPR();
+    }
+
+    GPRReg gpr() const
+    {
+        return decodeGPR(m_value);
+    }
+
+    FPRReg fpr() const
+    {
+        return decodeFPR(m_value);
+    }
+
+    Reg reg() const
+    {
+        if (isGP())
+            return gpr();
+        return fpr();
+    }
+
+    bool hasTmpIndex() const
+    {
+        return !isReg();
+    }
+
+    unsigned gpTmpIndex() const
+    {
+        return decodeGPTmp(m_value);
+    }
+
+    unsigned fpTmpIndex() const
+    {
+        return decodeFPTmp(m_value);
+    }
+
+    unsigned tmpIndex() const
+    {
+        if (isGP())
+            return gpTmpIndex();
+        return fpTmpIndex();
+    }
+
+    bool isAlive() const
+    {
+        return !!*this;
+    }
+
+    bool operator==(const Tmp& other) const
+    {
+        return m_value == other.m_value;
+    }
+
+    bool operator!=(const Tmp& other) const
+    {
+        return !(*this == other);
+    }
+
+    void dump(PrintStream& out) const;
+
+    Tmp(WTF::HashTableDeletedValueType)
+        : m_value(std::numeric_limits::max())
+    {
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return *this == Tmp(WTF::HashTableDeletedValue);
+    }
+
+    unsigned hash() const
+    {
+        return WTF::IntHash::hash(m_value);
+    }
+
+    unsigned internalValue() const { return static_cast(m_value); }
+
+    static Tmp tmpForInternalValue(unsigned index)
+    {
+        Tmp result;
+        result.m_value = static_cast(index);
+        return result;
+    }
+
+private:
+    static int encodeGP(unsigned index)
+    {
+        return 1 + index;
+    }
+
+    static int encodeFP(unsigned index)
+    {
+        return -1 - index;
+    }
+
+    static int encodeGPR(GPRReg gpr)
+    {
+        return encodeGP(gpr - MacroAssembler::firstRegister());
+    }
+
+    static int encodeFPR(FPRReg fpr)
+    {
+        return encodeFP(fpr - MacroAssembler::firstFPRegister());
+    }
+
+    static int encodeGPTmp(unsigned index)
+    {
+        return encodeGPR(MacroAssembler::lastRegister()) + 1 + index;
+    }
+
+    static int encodeFPTmp(unsigned index)
+    {
+        return encodeFPR(MacroAssembler::lastFPRegister()) - 1 - index;
+    }
+
+    static bool isEncodedGP(int value)
+    {
+        return value > 0;
+    }
+
+    static bool isEncodedFP(int value)
+    {
+        return value < 0;
+    }
+
+    static bool isEncodedGPR(int value)
+    {
+        return isEncodedGP(value) && value <= encodeGPR(MacroAssembler::lastRegister());
+    }
+
+    static bool isEncodedFPR(int value)
+    {
+        return isEncodedFP(value) && value >= encodeFPR(MacroAssembler::lastFPRegister());
+    }
+
+    static bool isEncodedGPTmp(int value)
+    {
+        return isEncodedGP(value) && !isEncodedGPR(value);
+    }
+
+    static bool isEncodedFPTmp(int value)
+    {
+        return isEncodedFP(value) && !isEncodedFPR(value);
+    }
+
+    static GPRReg decodeGPR(int value)
+    {
+        ASSERT(isEncodedGPR(value));
+        return static_cast(
+            (value - encodeGPR(MacroAssembler::firstRegister())) + MacroAssembler::firstRegister());
+    }
+
+    static FPRReg decodeFPR(int value)
+    {
+        ASSERT(isEncodedFPR(value));
+        return static_cast(
+            (encodeFPR(MacroAssembler::firstFPRegister()) - value) +
+            MacroAssembler::firstFPRegister());
+    }
+
+    static unsigned decodeGPTmp(int value)
+    {
+        ASSERT(isEncodedGPTmp(value));
+        return value - (encodeGPR(MacroAssembler::lastRegister()) + 1);
+    }
+
+    static unsigned decodeFPTmp(int value)
+    {
+        ASSERT(isEncodedFPTmp(value));
+        return (encodeFPR(MacroAssembler::lastFPRegister()) - 1) - value;
+    }
+
+    // 0: empty Tmp
+    // positive: GPRs and then GP temps.
+    // negative: FPRs and then FP temps.
+    int m_value;
+};
+
+struct TmpHash {
+    static unsigned hash(const Tmp& key) { return key.hash(); }
+    static bool equal(const Tmp& a, const Tmp& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } } // namespace JSC::B3::Air
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::B3::Air::TmpHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirTmpInlines.h b/b3/air/AirTmpInlines.h
new file mode 100644
index 0000000..a7de098
--- /dev/null
+++ b/b3/air/AirTmpInlines.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+#include "AirTmp.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+inline Tmp::Tmp(const Arg& arg)
+{
+    *this = arg.tmp();
+}
+
+// When a Hash structure is too slow or when Sets contains most values, you can
+// use direct array addressing with Tmps.
+template
+struct AbsoluteTmpMapper;
+
+template<>
+struct AbsoluteTmpMapper {
+    static unsigned absoluteIndex(const Tmp& tmp)
+    {
+        ASSERT(tmp.isGP());
+        ASSERT(static_cast(tmp.internalValue()) > 0);
+        return tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::gpTmpForIndex(tmpIndex));
+    }
+
+    static unsigned lastMachineRegisterIndex()
+    {
+        return absoluteIndex(Tmp(MacroAssembler::lastRegister()));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(tmpIndex);
+    }
+};
+
+template<>
+struct AbsoluteTmpMapper {
+    static unsigned absoluteIndex(const Tmp& tmp)
+    {
+        ASSERT(tmp.isFP());
+        ASSERT(static_cast(tmp.internalValue()) < 0);
+        return -tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::fpTmpForIndex(tmpIndex));
+    }
+
+    static unsigned lastMachineRegisterIndex()
+    {
+        return absoluteIndex(Tmp(MacroAssembler::lastFPRegister()));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(-tmpIndex);
+    }
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirTmpWidth.cpp b/b3/air/AirTmpWidth.cpp
new file mode 100644
index 0000000..f1173c0
--- /dev/null
+++ b/b3/air/AirTmpWidth.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AirTmpWidth.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+TmpWidth::TmpWidth()
+{
+}
+
+TmpWidth::TmpWidth(Code& code)
+{
+    recompute(code);
+}
+
+TmpWidth::~TmpWidth()
+{
+}
+
+void TmpWidth::recompute(Code& code)
+{
+    // Set this to true to cause this analysis to always return pessimistic results.
+    const bool beCareful = false;
+
+    const bool verbose = false;
+
+    if (verbose) {
+        dataLog("Code before TmpWidth:\n");
+        dataLog(code);
+    }
+    
+    m_width.clear();
+    
+    auto assumeTheWorst = [&] (Tmp tmp) {
+        Widths& widths = m_width.add(tmp, Widths()).iterator->value;
+        Arg::Type type = Arg(tmp).type();
+        widths.use = Arg::conservativeWidth(type);
+        widths.def = Arg::conservativeWidth(type);
+    };
+    
+    // Assume the worst for registers.
+    RegisterSet::allRegisters().forEach(
+        [&] (Reg reg) {
+            assumeTheWorst(Tmp(reg));
+        });
+
+    if (beCareful) {
+        code.forAllTmps(assumeTheWorst);
+        
+        // We fall through because the fixpoint that follows can only make things even more
+        // conservative. This mode isn't meant to be fast, just safe.
+    }
+
+    // Now really analyze everything but Move's over Tmp's, but set aside those Move's so we can find
+    // them quickly during the fixpoint below. Note that we can make this analysis stronger by
+    // recognizing more kinds of Move's or anything that has Move-like behavior, though it's probably not
+    // worth it.
+    Vector moves;
+    for (BasicBlock* block : code) {
+        for (Inst& inst : *block) {
+            if (inst.kind.opcode == Move && inst.args[1].isTmp()) {
+                if (inst.args[0].isTmp()) {
+                    // Make sure that both sides of the Move have a width already initialized. The
+                    // fixpoint below assumes that it never has to add things to the HashMap.
+                    m_width.add(inst.args[0].tmp(), Widths(Arg::GP));
+                    m_width.add(inst.args[1].tmp(), Widths(Arg::GP));
+                    
+                    moves.append(&inst);
+                    continue;
+                }
+                if (inst.args[0].isImm()
+                    && inst.args[0].value() >= 0) {
+                    Tmp tmp = inst.args[1].tmp();
+                    Widths& widths = m_width.add(tmp, Widths(Arg::GP)).iterator->value;
+                    
+                    if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width8);
+                    else if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width16);
+                    else if (inst.args[0].value() <= std::numeric_limits::max())
+                        widths.def = std::max(widths.def, Arg::Width32);
+                    else
+                        widths.def = std::max(widths.def, Arg::Width64);
+
+                    continue;
+                }
+            }
+            inst.forEachTmp(
+                [&] (Tmp& tmp, Arg::Role role, Arg::Type type, Arg::Width width) {
+                    Widths& widths = m_width.add(tmp, Widths(type)).iterator->value;
+                    
+                    if (Arg::isAnyUse(role))
+                        widths.use = std::max(widths.use, width);
+
+                    if (Arg::isZDef(role))
+                        widths.def = std::max(widths.def, width);
+                    else if (Arg::isAnyDef(role))
+                        widths.def = Arg::conservativeWidth(type);
+                });
+        }
+    }
+
+    // Finally, fixpoint over the Move's.
+    bool changed = true;
+    while (changed) {
+        changed = false;
+        for (Inst* move : moves) {
+            ASSERT(move->kind.opcode == Move);
+            ASSERT(move->args[0].isTmp());
+            ASSERT(move->args[1].isTmp());
+
+            // We already ensure that both tmps are added to the width map. That's important
+            // because you cannot add both tmps here while simultaneously getting a reference to
+            // their values, since the second add would invalidate the reference returned by the
+            // first one.
+            Widths& srcWidths = m_width.find(move->args[0].tmp())->value;
+            Widths& dstWidths = m_width.find(move->args[1].tmp())->value;
+
+            // Legend:
+            //
+            //     Move %src, %dst
+
+            // defWidth(%dst) is a promise about how many high bits are zero. The smaller the width, the
+            // stronger the promise. This Move may weaken that promise if we know that %src is making a
+            // weaker promise. Such forward flow is the only thing that determines defWidth().
+            if (dstWidths.def < srcWidths.def) {
+                dstWidths.def = srcWidths.def;
+                changed = true;
+            }
+
+            // srcWidth(%src) is a promise about how many high bits are ignored. The smaller the width,
+            // the stronger the promise. This Move may weaken that promise if we know that %dst is making
+            // a weaker promise. Such backward flow is the only thing that determines srcWidth().
+            if (srcWidths.use < dstWidths.use) {
+                srcWidths.use = dstWidths.use;
+                changed = true;
+            }
+        }
+    }
+
+    if (verbose)
+        dataLog("width: ", mapDump(m_width), "\n");
+}
+
+void TmpWidth::Widths::dump(PrintStream& out) const
+{
+    out.print("{use = ", use, ", def = ", def, "}");
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirTmpWidth.h b/b3/air/AirTmpWidth.h
new file mode 100644
index 0000000..ea612b6
--- /dev/null
+++ b/b3/air/AirTmpWidth.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArg.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+class TmpWidth {
+public:
+    TmpWidth();
+    TmpWidth(Code&);
+    ~TmpWidth();
+
+    void recompute(Code&);
+
+    // The width of a Tmp is the number of bits that you need to be able to track without some trivial
+    // recovery. A Tmp may have a "subwidth" (say, Width32 on a 64-bit system) if either of the following
+    // is true:
+    //
+    // - The high bits are never read.
+    // - The high bits are always zero.
+    //
+    // This doesn't tell you which of those properties holds, but you can query that using the other
+    // methods.
+    Arg::Width width(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return std::min(iter->value.use, iter->value.def);
+    }
+
+    // Return the minimum required width for all defs/uses of this Tmp.
+    Arg::Width requiredWidth(Tmp tmp)
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return std::max(iter->value.use, iter->value.def);
+    }
+
+    // This indirectly tells you how much of the tmp's high bits are guaranteed to be zero. The number of
+    // high bits that are zero are:
+    //
+    //     TotalBits - defWidth(tmp)
+    //
+    // Where TotalBits are the total number of bits in the register, so 64 on a 64-bit system.
+    Arg::Width defWidth(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return iter->value.def;
+    }
+
+    // This tells you how much of Tmp is going to be read.
+    Arg::Width useWidth(Tmp tmp) const
+    {
+        auto iter = m_width.find(tmp);
+        if (iter == m_width.end())
+            return Arg::minimumWidth(Arg(tmp).type());
+        return iter->value.use;
+    }
+    
+private:
+    struct Widths {
+        Widths() { }
+
+        Widths(Arg::Type type)
+        {
+            use = Arg::minimumWidth(type);
+            def = Arg::minimumWidth(type);
+        }
+
+        void dump(PrintStream& out) const;
+        
+        Arg::Width use;
+        Arg::Width def;
+    };
+    
+    HashMap m_width;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirUseCounts.h b/b3/air/AirUseCounts.h
new file mode 100644
index 0000000..98a7493
--- /dev/null
+++ b/b3/air/AirUseCounts.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+#include "AirArgInlines.h"
+#include "AirBlockWorklist.h"
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include 
+#include 
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// Computes the number of uses of a variable based on frequency of execution. The frequency of blocks
+// that are only reachable by rare edges is scaled by Options::rareBlockPenalty().
+
+// Thing can be either Tmp or Arg.
+template
+class UseCounts {
+public:
+    struct Counts {
+        void dump(PrintStream& out) const
+        {
+            out.print(
+                "{numWarmUses = ", numWarmUses, ", numColdUses = ", numColdUses, ", numDefs = ",
+                numDefs, "}");
+        }
+        
+        double numWarmUses { 0 };
+        double numColdUses { 0 };
+        double numDefs { 0 };
+        double numConstDefs { 0 };
+    };
+    
+    UseCounts(Code& code)
+    {
+        // Find non-rare blocks.
+        BlockWorklist fastWorklist;
+        fastWorklist.push(code[0]);
+        while (BasicBlock* block = fastWorklist.pop()) {
+            for (FrequentedBlock& successor : block->successors()) {
+                if (!successor.isRare())
+                    fastWorklist.push(successor.block());
+            }
+        }
+        
+        for (BasicBlock* block : code) {
+            double frequency = block->frequency();
+            if (!fastWorklist.saw(block))
+                frequency *= Options::rareBlockPenalty();
+            for (Inst& inst : *block) {
+                inst.forEach(
+                    [&] (Thing& arg, Arg::Role role, Arg::Type, Arg::Width) {
+                        Counts& counts = m_counts.add(arg, Counts()).iterator->value;
+
+                        if (Arg::isWarmUse(role))
+                            counts.numWarmUses += frequency;
+                        if (Arg::isColdUse(role))
+                            counts.numColdUses += frequency;
+                        if (Arg::isAnyDef(role))
+                            counts.numDefs += frequency;
+                    });
+
+                if ((inst.kind.opcode == Move || inst.kind.opcode == Move32)
+                    && inst.args[0].isSomeImm()
+                    && inst.args[1].is())
+                    m_counts.add(inst.args[1].as(), Counts()).iterator->value.numConstDefs++;
+            }
+        }
+    }
+
+    const Counts* operator[](const Thing& arg) const
+    {
+        auto iter = m_counts.find(arg);
+        if (iter == m_counts.end())
+            return nullptr;
+        return &iter->value;
+    }
+
+    void dump(PrintStream& out) const
+    {
+        out.print(mapDump(m_counts));
+    }
+
+private:
+    HashMap m_counts;
+};
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/AirValidate.cpp b/b3/air/AirValidate.cpp
new file mode 100644
index 0000000..d90de62
--- /dev/null
+++ b/b3/air/AirValidate.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "AirValidate.h"
+
+#if ENABLE(B3_JIT)
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "B3Procedure.h"
+
+namespace JSC { namespace B3 { namespace Air {
+
+namespace {
+
+class Validater {
+public:
+    Validater(Code& code, const char* dumpBefore)
+        : m_code(code)
+        , m_dumpBefore(dumpBefore)
+    {
+    }
+
+#define VALIDATE(condition, message) do {                               \
+        if (condition)                                                  \
+            break;                                                      \
+        fail(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #condition, toCString message); \
+    } while (false)
+    
+    void run()
+    {
+        HashSet validSlots;
+        HashSet validBlocks;
+        HashSet validSpecials;
+        
+        for (BasicBlock* block : m_code)
+            validBlocks.add(block);
+        for (StackSlot* slot : m_code.stackSlots())
+            validSlots.add(slot);
+        for (Special* special : m_code.specials())
+            validSpecials.add(special);
+
+        for (BasicBlock* block : m_code) {
+            // Blocks that are entrypoints must not have predecessors.
+            if (m_code.isEntrypoint(block))
+                VALIDATE(!block->numPredecessors(), ("At entrypoint ", *block));
+            
+            for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
+                Inst& inst = block->at(instIndex);
+                for (Arg& arg : inst.args) {
+                    switch (arg.kind()) {
+                    case Arg::Stack:
+                        VALIDATE(validSlots.contains(arg.stackSlot()), ("At ", inst, " in ", *block));
+                        break;
+                    case Arg::Special:
+                        VALIDATE(validSpecials.contains(arg.special()), ("At ", inst, " in ", *block));
+                        break;
+                    default:
+                        break;
+                    }
+                }
+                VALIDATE(inst.isValidForm(), ("At ", inst, " in ", *block));
+                if (instIndex == block->size() - 1)
+                    VALIDATE(inst.isTerminal(), ("At ", inst, " in ", *block));
+                else
+                    VALIDATE(!inst.isTerminal(), ("At ", inst, " in ", *block));
+
+                // forEachArg must return Arg&'s that point into the args array.
+                inst.forEachArg(
+                    [&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
+                        VALIDATE(&arg >= &inst.args[0], ("At ", arg, " in ", inst, " in ", *block));
+                        VALIDATE(&arg <= &inst.args.last(), ("At ", arg, " in ", inst, " in ", *block));
+                    });
+                
+                switch (inst.kind.opcode) {
+                case EntrySwitch:
+                    VALIDATE(block->numSuccessors() == m_code.proc().numEntrypoints(), ("At ", inst, " in ", *block));
+                    break;
+                case Shuffle:
+                    // We can't handle trapping shuffles because of how we lower them. That could
+                    // be fixed though.
+                    VALIDATE(!inst.kind.traps, ("At ", inst, " in ", *block));
+                    break;
+                default:
+                    break;
+                }
+            }
+            for (BasicBlock* successor : block->successorBlocks())
+                VALIDATE(validBlocks.contains(successor), ("In ", *block));
+        }
+    }
+
+private:
+    NO_RETURN_DUE_TO_CRASH void fail(
+        const char* filename, int lineNumber, const char* function, const char* condition,
+        CString message)
+    {
+        CString failureMessage;
+        {
+            StringPrintStream out;
+            out.print("AIR VALIDATION FAILURE\n");
+            out.print("    ", condition, " (", filename, ":", lineNumber, ")\n");
+            out.print("    ", message, "\n");
+            out.print("    After ", m_code.lastPhaseName(), "\n");
+            failureMessage = out.toCString();
+        }
+
+        dataLog(failureMessage);
+        if (m_dumpBefore) {
+            dataLog("Before ", m_code.lastPhaseName(), ":\n");
+            dataLog(m_dumpBefore);
+        }
+        dataLog("At time of failure:\n");
+        dataLog(m_code);
+
+        dataLog(failureMessage);
+        WTFReportAssertionFailure(filename, lineNumber, function, condition);
+        CRASH();
+    }
+    
+    Code& m_code;
+    const char* m_dumpBefore;
+};
+
+} // anonymous namespace
+
+void validate(Code& code, const char* dumpBefore)
+{
+    Validater validater(code, dumpBefore);
+    validater.run();
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
diff --git a/b3/air/AirValidate.h b/b3/air/AirValidate.h
new file mode 100644
index 0000000..472c763
--- /dev/null
+++ b/b3/air/AirValidate.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+JS_EXPORT_PRIVATE void validate(Code&, const char* dumpBefore = nullptr);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
diff --git a/b3/air/opcode_generator.rb b/b3/air/opcode_generator.rb
new file mode 100644
index 0000000..d142405
--- /dev/null
+++ b/b3/air/opcode_generator.rb
@@ -0,0 +1,1228 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "pathname"
+
+class Opcode
+    attr_reader :name, :custom, :overloads
+    attr_reader :attributes
+
+    def initialize(name, custom)
+        @name = name
+        @custom = custom
+        @attributes = {}
+        unless custom
+            @overloads = []
+        end
+    end
+
+    def masmName
+        name[0].downcase + name[1..-1]
+    end
+end
+
+class Arg
+    attr_reader :role, :type, :width
+
+    def initialize(role, type, width)
+        @role = role
+        @type = type
+        @width = width
+    end
+
+    def widthCode
+        if width == "Ptr"
+            "Arg::pointerWidth()"
+        else
+            "Arg::Width#{width}"
+        end
+    end
+end
+
+class Overload
+    attr_reader :signature, :forms
+
+    def initialize(signature, forms)
+        @signature = signature
+        @forms = forms
+    end
+end
+
+class Kind
+    attr_reader :name
+    attr_accessor :custom
+
+    def initialize(name)
+        @name = name
+        @custom = false
+    end
+
+    def ==(other)
+        if other.is_a? String
+            @name == other
+        else
+            @name == other.name and @custom == other.custom
+        end
+    end
+
+    def Kind.argKinds(kind)
+        if kind == "Addr"
+            ["Addr", "Stack", "CallArg"]
+        else
+            [kind]
+        end
+    end
+
+    def argKinds
+        Kind.argKinds(kind)
+    end
+end
+
+class Form
+    attr_reader :kinds, :altName, :archs
+
+    def initialize(kinds, altName, archs)
+        @kinds = kinds
+        @altName = altName
+        @archs = archs
+    end
+end
+
+class Origin
+    attr_reader :fileName, :lineNumber
+    
+    def initialize(fileName, lineNumber)
+        @fileName = fileName
+        @lineNumber = lineNumber
+    end
+    
+    def to_s
+        "#{fileName}:#{lineNumber}"
+    end
+end
+
+class Token
+    attr_reader :origin, :string
+    
+    def initialize(origin, string)
+        @origin = origin
+        @string = string
+    end
+    
+    def ==(other)
+        if other.is_a? Token
+            @string == other.string
+        else
+            @string == other
+        end
+    end
+    
+    def =~(other)
+        @string =~ other
+    end
+    
+    def to_s
+        "#{@string.inspect} at #{origin}"
+    end
+    
+    def parseError(*comment)
+        if comment.empty?
+            raise "Parse error: #{to_s}"
+        else
+            raise "Parse error: #{to_s}: #{comment[0]}"
+        end
+    end
+end
+
+def lex(str, fileName)
+    fileName = Pathname.new(fileName)
+    result = []
+    lineNumber = 1
+    while not str.empty?
+        case str
+        when /\A\#([^\n]*)/
+            # comment, ignore
+        when /\A\n/
+            # newline, ignore
+            lineNumber += 1
+        when /\A([a-zA-Z0-9_]+)/
+            result << Token.new(Origin.new(fileName, lineNumber), $&)
+        when /\A([ \t\r]+)/
+            # whitespace, ignore
+        when /\A[,:*\/]/
+            result << Token.new(Origin.new(fileName, lineNumber), $&)
+        else
+            raise "Lexer error at #{Origin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
+        end
+        str = $~.post_match
+    end
+    result
+end
+
+def isRole(token)
+    token =~ /\A((U)|(D)|(UD)|(ZD)|(UZD)|(UA)|(S))\Z/
+end
+
+def isGF(token)
+    token =~ /\A((G)|(F))\Z/
+end
+
+def isKind(token)
+    token =~ /\A((Tmp)|(Imm)|(BigImm)|(BitImm)|(BitImm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
+end
+
+def isArch(token)
+    token =~ /\A((x86)|(x86_32)|(x86_64)|(arm)|(armv7)|(arm64)|(32)|(64))\Z/
+end
+
+def isWidth(token)
+    token =~ /\A((8)|(16)|(32)|(64)|(Ptr))\Z/
+end
+
+def isKeyword(token)
+    isRole(token) or isGF(token) or isKind(token) or isArch(token) or isWidth(token) or
+        token == "custom" or token == "as"
+end
+
+def isIdentifier(token)
+    token =~ /\A([a-zA-Z0-9_]+)\Z/ and not isKeyword(token)
+end
+
+class Parser
+    def initialize(data, fileName)
+        @tokens = lex(data, fileName)
+        @idx = 0
+    end
+
+    def token
+        @tokens[@idx]
+    end
+
+    def advance
+        @idx += 1
+    end
+
+    def parseError(*comment)
+        if token
+            token.parseError(*comment)
+        else
+            if comment.empty?
+                raise "Parse error at end of file"
+            else
+                raise "Parse error at end of file: #{comment[0]}"
+            end
+        end
+    end
+
+    def consume(string)
+        parseError("Expected #{string}") unless token == string
+        advance
+    end
+
+    def consumeIdentifier
+        result = token.string
+        parseError("Expected identifier") unless isIdentifier(result)
+        advance
+        result
+    end
+
+    def consumeRole
+        result = token.string
+        parseError("Expected role (U, D, UD, ZD, UZD, UA, or S)") unless isRole(result)
+        advance
+        result
+    end
+
+    def consumeType
+        result = token.string
+        parseError("Expected type (G or F)") unless isGF(result)
+        advance
+        result
+    end
+
+    def consumeKind
+        result = token.string
+        parseError("Expected kind (Imm, BigImm, BitImm, BitImm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
+        advance
+        result
+    end
+
+    def consumeWidth
+        result = token.string
+        parseError("Expected width (8, 16, 32, or 64)") unless isWidth(result)
+        advance
+        result
+    end
+
+    def parseArchs
+        return nil unless isArch(token)
+
+        result = []
+        while isArch(token)
+            case token.string
+            when "x86"
+                result << "X86"
+                result << "X86_64"
+            when "x86_32"
+                result << "X86"
+            when "x86_64"
+                result << "X86_64"
+            when "arm"
+                result << "ARMv7"
+                result << "ARM64"
+            when "armv7"
+                result << "ARMv7"
+            when "arm64"
+                result << "ARM64"
+            when "32"
+                result << "X86"
+                result << "ARMv7"
+            when "64"
+                result << "X86_64"
+                result << "ARM64"
+            else
+                raise token.string
+            end
+            advance
+        end
+
+        consume(":")
+        @lastArchs = result
+    end
+
+    def consumeArchs
+        result = @lastArchs
+        @lastArchs = nil
+        result
+    end
+
+    def parseAndConsumeArchs
+        parseArchs
+        consumeArchs
+    end
+
+    def intersectArchs(left, right)
+        return left unless right
+        return right unless left
+
+        left.select {
+            | value |
+            right.find {
+                | otherValue |
+                value == otherValue
+            }
+        }
+    end
+
+    def parse
+        result = {}
+        
+        loop {
+            break if @idx >= @tokens.length
+
+            if token == "custom"
+                consume("custom")
+                opcodeName = consumeIdentifier
+
+                parseError("Cannot overload a custom opcode") if result[opcodeName]
+
+                result[opcodeName] = Opcode.new(opcodeName, true)
+            else
+                opcodeArchs = parseAndConsumeArchs
+
+                opcodeName = consumeIdentifier
+
+                if result[opcodeName]
+                    opcode = result[opcodeName]
+                    parseError("Cannot overload a custom opcode") if opcode.custom
+                else
+                    opcode = Opcode.new(opcodeName, false)
+                    result[opcodeName] = opcode
+                end
+
+                signature = []
+                forms = []
+                
+                if isRole(token)
+                    loop {
+                        role = consumeRole
+                        consume(":")
+                        type = consumeType
+                        consume(":")
+                        width = consumeWidth
+                        
+                        signature << Arg.new(role, type, width)
+                        
+                        break unless token == ","
+                        consume(",")
+                    }
+                end
+
+                while token == "/"
+                    consume("/")
+                    case token.string
+                    when "branch"
+                        opcode.attributes[:branch] = true
+                        opcode.attributes[:terminal] = true
+                    when "terminal"
+                        opcode.attributes[:terminal] = true
+                    when "effects"
+                        opcode.attributes[:effects] = true
+                    when "return"
+                        opcode.attributes[:return] = true
+                        opcode.attributes[:terminal] = true
+                    else
+                        parseError("Bad / directive")
+                    end
+                    advance
+                end
+
+                parseArchs
+                if isKind(token)
+                    loop {
+                        kinds = []
+                        altName = nil
+                        formArchs = consumeArchs
+                        loop {
+                            kinds << Kind.new(consumeKind)
+
+                            if token == "*"
+                                parseError("Can only apply * to Tmp") unless kinds[-1].name == "Tmp"
+                                kinds[-1].custom = true
+                                consume("*")
+                            end
+
+                            break unless token == ","
+                            consume(",")
+                        }
+
+                        if token == "as"
+                            consume("as")
+                            altName = consumeIdentifier
+                        end
+
+                        parseError("Form has wrong number of arguments for overload") unless kinds.length == signature.length
+                        kinds.each_with_index {
+                            | kind, index |
+                            if kind.name == "Imm" or kind.name == "BigImm" or kind.name == "BitImm" or kind.name == "BitImm64"
+                                if signature[index].role != "U"
+                                    parseError("Form has an immediate for a non-use argument")
+                                end
+                                if signature[index].type != "G"
+                                    parseError("Form has an immediate for a non-general-purpose argument")
+                                end
+                            end
+                        }
+                        forms << Form.new(kinds, altName, intersectArchs(opcodeArchs, formArchs))
+
+                        parseArchs
+                        break unless isKind(token)
+                    }
+                end
+
+                if signature.length == 0
+                    raise unless forms.length == 0
+                    forms << Form.new([], nil, opcodeArchs)
+                end
+
+                opcode.overloads << Overload.new(signature, forms)
+            end
+        }
+
+        result
+    end
+end
+
+$fileName = ARGV[0]
+
+parser = Parser.new(IO::read($fileName), $fileName)
+$opcodes = parser.parse
+
+def writeH(filename)
+    File.open("Air#{filename}.h", "w") {
+        | outp |
+        
+        outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+        
+        outp.puts "#ifndef Air#{filename}_h"
+        outp.puts "#define Air#{filename}_h"
+
+        yield outp
+        
+        outp.puts "#endif // Air#{filename}_h"
+    }
+end
+
+writeH("Opcode") {
+    | outp |
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    outp.puts "enum Opcode : int16_t {"
+    $opcodes.keys.sort.each {
+        | opcode |
+        outp.puts "    #{opcode},"
+    }
+    outp.puts "};"
+
+    outp.puts "static const unsigned numOpcodes = #{$opcodes.keys.size};"
+    outp.puts "} } } // namespace JSC::B3::Air"
+    
+    outp.puts "namespace WTF {"
+    outp.puts "class PrintStream;"
+    outp.puts "JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::B3::Air::Opcode);"
+    outp.puts "} // namespace WTF"
+}
+
+# From here on, we don't try to emit properly indented code, since we're using a recursive pattern
+# matcher.
+
+def matchForms(outp, speed, forms, columnIndex, columnGetter, filter, callback)
+    return if forms.length == 0
+
+    if filter[forms]
+        return
+    end
+
+    if columnIndex >= forms[0].kinds.length
+        raise "Did not reduce to one form: #{forms.inspect}" unless forms.length == 1
+        callback[forms[0]]
+        outp.puts "break;"
+        return
+    end
+    
+    groups = {}
+    forms.each {
+        | form |
+        kind = form.kinds[columnIndex].name
+        if groups[kind]
+            groups[kind] << form
+        else
+            groups[kind] = [form]
+        end
+    }
+
+    if speed == :fast and groups.length == 1
+        matchForms(outp, speed, forms, columnIndex + 1, columnGetter, filter, callback)
+        return
+    end
+
+    outp.puts "switch (#{columnGetter[columnIndex]}) {"
+    groups.each_pair {
+        | key, value |
+        outp.puts "#if USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+        Kind.argKinds(key).each {
+            | argKind |
+            outp.puts "case Arg::#{argKind}:"
+        }
+        matchForms(outp, speed, value, columnIndex + 1, columnGetter, filter, callback)
+        outp.puts "break;"
+        outp.puts "#endif // USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+end
+
+def matchInstOverload(outp, speed, inst)
+    outp.puts "switch (#{inst}->kind.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            yield opcode, nil
+        else
+            needOverloadSwitch = ((opcode.overloads.size != 1) or speed == :safe)
+            outp.puts "switch (#{inst}->args.size()) {" if needOverloadSwitch
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                yield opcode, overload
+                outp.puts "break;" if needOverloadSwitch
+            }
+            if needOverloadSwitch
+                outp.puts "default:"
+                outp.puts "break;"
+                outp.puts "}"
+            end
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+end
+    
+def matchInstOverloadForm(outp, speed, inst)
+    matchInstOverload(outp, speed, inst) {
+        | opcode, overload |
+        if opcode.custom
+            yield opcode, nil, nil
+        else
+            columnGetter = proc {
+                | columnIndex |
+                "#{inst}->args[#{columnIndex}].kind()"
+            }
+            filter = proc { false }
+            callback = proc {
+                | form |
+                yield opcode, overload, form
+            }
+            matchForms(outp, speed, overload.forms, 0, columnGetter, filter, callback)
+        end
+    }
+end
+
+def beginArchs(outp, archs)
+    return unless archs
+    if archs.empty?
+        outp.puts "#if 0"
+        return
+    end
+    outp.puts("#if " + archs.map {
+                  | arch |
+                  "CPU(#{arch})"
+              }.join(" || "))
+end
+
+def endArchs(outp, archs)
+    return unless archs
+    outp.puts "#endif"
+end
+
+writeH("OpcodeUtils") {
+    | outp |
+    outp.puts "#include \"AirCustom.h\""
+    outp.puts "#include \"AirInst.h\""
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    
+    outp.puts "inline bool opgenHiddenTruth() { return true; }"
+    outp.puts "template"
+    outp.puts "inline T* opgenHiddenPtrIdentity(T* pointer) { return pointer; }"
+    outp.puts "#define OPGEN_RETURN(value) do {\\"
+    outp.puts "    if (opgenHiddenTruth())\\"
+    outp.puts "        return value;\\"
+    outp.puts "} while (false)"
+
+    outp.puts "template"
+    outp.puts "void Inst::forEachArg(const Functor& functor)"
+    outp.puts "{"
+    matchInstOverload(outp, :fast, "this") {
+        | opcode, overload |
+        if opcode.custom
+            outp.puts "#{opcode.name}Custom::forEachArg(*this, functor);"
+        else
+            overload.signature.each_with_index {
+                | arg, index |
+                
+                role = nil
+                case arg.role
+                when "U"
+                    role = "Use"
+                when "D"
+                    role = "Def"
+                when "ZD"
+                    role = "ZDef"
+                when "UD"
+                    role = "UseDef"
+                when "UZD"
+                    role = "UseZDef"
+                when "UA"
+                    role = "UseAddr"
+                when "S"
+                    role = "Scratch"
+                else
+                    raise
+                end
+
+                outp.puts "functor(args[#{index}], Arg::#{role}, Arg::#{arg.type}P, #{arg.widthCode});"
+            }
+        end
+    }
+    outp.puts "}"
+
+    outp.puts "template"
+    outp.puts "ALWAYS_INLINE bool isValidForm(Opcode opcode, Arguments... arguments)"
+    outp.puts "{"
+    outp.puts "Arg::Kind kinds[sizeof...(Arguments)] = { arguments... };"
+    outp.puts "switch (opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidFormStatic(arguments...));"
+        else
+            outp.puts "switch (sizeof...(Arguments)) {"
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:"
+                columnGetter = proc { | columnIndex | "opgenHiddenPtrIdentity(kinds)[#{columnIndex}]" }
+                filter = proc { false }
+                callback = proc {
+                    | form |
+                    # This conservatively says that Stack is not a valid form for UseAddr,
+                    # because it's only valid if it's not a spill slot. This is consistent with
+                    # isValidForm() being conservative and it also happens to be practical since
+                    # we don't really use isValidForm for deciding when Stack is safe.
+                    overload.signature.length.times {
+                        | index |
+                        if overload.signature[index].role == "UA"
+                            outp.puts "if (opgenHiddenPtrIdentity(kinds)[#{index}] == Arg::Stack)"
+                            outp.puts "    return false;"
+                        end
+                    }
+                    
+                    notCustom = (not form.kinds.detect { | kind | kind.custom })
+                    if notCustom
+                        beginArchs(outp, form.archs)
+                        outp.puts "OPGEN_RETURN(true);"
+                        endArchs(outp, form.archs)
+                    end
+                }
+                matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+                outp.puts "break;"
+            }
+            outp.puts "default:"
+            outp.puts "break;"
+            outp.puts "}"
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "break;"
+    outp.puts "}"
+    outp.puts "return false; "
+    outp.puts "}"
+
+    outp.puts "inline bool isDefinitelyTerminal(Opcode opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    didFindTerminals = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal]
+            outp.puts "case #{opcode.name}:"
+            didFindTerminals = true
+        end
+    }
+    if didFindTerminals
+        outp.puts "return true;"
+    end
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+
+    outp.puts "inline bool isReturn(Opcode opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    didFindReturns = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:return]
+            outp.puts "case #{opcode.name}:"
+            didFindReturns = true
+        end
+    }
+    if didFindReturns
+        outp.puts "return true;"
+    end
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+writeH("OpcodeGenerated") {
+    | outp |
+    outp.puts "#include \"AirInstInlines.h\""
+    outp.puts "#include \"wtf/PrintStream.h\""
+    outp.puts "namespace WTF {"
+    outp.puts "using namespace JSC::B3::Air;"
+    outp.puts "void printInternal(PrintStream& out, Opcode opcode)"
+    outp.puts "{"
+    outp.puts "    switch (opcode) {"
+    $opcodes.keys.each {
+        | opcode |
+        outp.puts "    case #{opcode}:"
+        outp.puts "        out.print(\"#{opcode}\");"
+        outp.puts "        return;"
+    }
+    outp.puts "    }"
+    outp.puts "    RELEASE_ASSERT_NOT_REACHED();"
+    outp.puts "}"
+    outp.puts "} // namespace WTF"
+    outp.puts "namespace JSC { namespace B3 { namespace Air {"
+    outp.puts "bool Inst::isValidForm()"
+    outp.puts "{"
+    matchInstOverloadForm(outp, :safe, "this") {
+        | opcode, overload, form |
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::isValidForm(*this));"
+        else
+            beginArchs(outp, form.archs)
+            needsMoreValidation = false
+            overload.signature.length.times {
+                | index |
+                arg = overload.signature[index]
+                kind = form.kinds[index]
+                needsMoreValidation |= kind.custom
+
+                # Some kinds of Args reqire additional validation.
+                case kind.name
+                when "Tmp"
+                    outp.puts "if (!args[#{index}].tmp().is#{arg.type}P())"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Imm"
+                    outp.puts "if (!Arg::isValidImmForm(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BitImm"
+                    outp.puts "if (!Arg::isValidBitImmForm(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BitImm64"
+                    outp.puts "if (!Arg::isValidBitImm64Form(args[#{index}].value()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Addr"
+                    if arg.role == "UA"
+                        outp.puts "if (args[#{index}].isStack() && args[#{index}].stackSlot()->isSpill())"
+                        outp.puts "OPGEN_RETURN(false);"
+                    end
+                    
+                    outp.puts "if (!Arg::isValidAddrForm(args[#{index}].offset()))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "Index"
+                    outp.puts "if (!Arg::isValidIndexForm(args[#{index}].scale(), args[#{index}].offset(), #{arg.widthCode}))"
+                    outp.puts "OPGEN_RETURN(false);"
+                when "BigImm"
+                when "RelCond"
+                when "ResCond"
+                when "DoubleCond"
+                else
+                    raise "Unexpected kind: #{kind.name}"
+                end
+            }
+            if needsMoreValidation
+                outp.puts "if (!is#{opcode.name}Valid(*this))"
+                outp.puts "OPGEN_RETURN(false);"
+            end
+            outp.puts "OPGEN_RETURN(true);"
+            endArchs(outp, form.archs)
+        end
+    }
+    outp.puts "return false;"
+    outp.puts "}"
+
+    outp.puts "bool Inst::admitsStack(unsigned argIndex)"
+    outp.puts "{"
+    outp.puts "switch (kind.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::admitsStack(*this, argIndex));"
+        else
+            # Switch on the argIndex.
+            outp.puts "switch (argIndex) {"
+
+            numArgs = opcode.overloads.map {
+                | overload |
+                overload.signature.length
+            }.max
+            
+            numArgs.times {
+                | argIndex |
+                outp.puts "case #{argIndex}:"
+
+                # Check if all of the forms of all of the overloads either do, or don't, admit an address
+                # at this index. We expect this to be a very common case.
+                numYes = 0
+                numNo = 0
+                opcode.overloads.each {
+                    | overload |
+                    useAddr = (overload.signature[argIndex] and
+                               overload.signature[argIndex].role == "UA")
+                    overload.forms.each {
+                        | form |
+                        if form.kinds[argIndex] == "Addr" and not useAddr
+                            numYes += 1
+                        else
+                            numNo += 1
+                        end
+                    }
+                }
+
+                # Note that we deliberately test numYes first because if we end up with no forms, we want
+                # to say that Address is inadmissible.
+                if numYes == 0
+                    outp.puts "OPGEN_RETURN(false);"
+                elsif numNo == 0
+                    outp.puts "OPGEN_RETURN(true);"
+                else
+                    # Now do the full test.
+
+                    needOverloadSwitch = (opcode.overloads.size != 1)
+
+                    outp.puts "switch (args.size()) {" if needOverloadSwitch
+                    opcode.overloads.each {
+                        | overload |
+
+                        useAddr = (overload.signature[argIndex] and
+                                   overload.signature[argIndex].role == "UA")
+                        
+                        # Again, check if all of them do what we want.
+                        numYes = 0
+                        numNo = 0
+                        overload.forms.each {
+                            | form |
+                            if form.kinds[argIndex] == "Addr" and not useAddr
+                                numYes += 1
+                            else
+                                numNo += 1
+                            end
+                        }
+
+                        if numYes == 0
+                            # Don't emit anything, just drop to default.
+                        elsif numNo == 0
+                            outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                            outp.puts "OPGEN_RETURN(true);"
+                            outp.puts "break;" if needOverloadSwitch
+                        else
+                            outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+
+                            # This is how we test the hypothesis that changing this argument to an
+                            # address yields a valid form.
+                            columnGetter = proc {
+                                | columnIndex |
+                                if columnIndex == argIndex
+                                    "Arg::Addr"
+                                else
+                                    "args[#{columnIndex}].kind()"
+                                end
+                            }
+                            filter = proc {
+                                | forms |
+                                numYes = 0
+
+                                forms.each {
+                                    | form |
+                                    if form.kinds[argIndex] == "Addr"
+                                        numYes += 1
+                                    end
+                                }
+
+                                if numYes == 0
+                                    # Drop down, emit no code, since we cannot match.
+                                    true
+                                else
+                                    # Keep going.
+                                    false
+                                end
+                            }
+                            callback = proc {
+                                | form |
+                                beginArchs(outp, form.archs)
+                                outp.puts "OPGEN_RETURN(true);"
+                                endArchs(outp, form.archs)
+                            }
+                            matchForms(outp, :safe, overload.forms, 0, columnGetter, filter, callback)
+
+                            outp.puts "break;" if needOverloadSwitch
+                        end
+                    }
+                    if needOverloadSwitch
+                        outp.puts "default:"
+                        outp.puts "break;"
+                        outp.puts "}"
+                    end
+                end
+                
+                outp.puts "break;"
+            }
+            
+            outp.puts "default:"
+            outp.puts "break;"
+            outp.puts "}"
+        end
+        
+        outp.puts "break;"
+    }
+    outp.puts "default:";
+    outp.puts "break;"
+    outp.puts "}"
+    outp.puts "return false;"
+    outp.puts "}"
+
+    outp.puts "bool Inst::isTerminal()"
+    outp.puts "{"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::isTerminal(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "bool Inst::hasNonArgNonControlEffects()"
+    outp.puts "{"
+    outp.puts "if (kind.traps)"
+    outp.puts "return true;"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::hasNonArgNonControlEffects(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "bool Inst::hasNonArgEffects()"
+    outp.puts "{"
+    outp.puts "if (kind.traps)"
+    outp.puts "return true;"
+    outp.puts "switch (kind.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal] or opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom::hasNonArgEffects(*this);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "CCallHelpers::Jump Inst::generate(CCallHelpers& jit, GenerationContext& context)"
+    outp.puts "{"
+    outp.puts "UNUSED_PARAM(jit);"
+    outp.puts "UNUSED_PARAM(context);"
+    outp.puts "CCallHelpers::Jump result;"
+    matchInstOverloadForm(outp, :fast, "this") {
+        | opcode, overload, form |
+        if opcode.custom
+            outp.puts "OPGEN_RETURN(#{opcode.name}Custom::generate(*this, jit, context));"
+        else
+            beginArchs(outp, form.archs)
+            if form.altName
+                methodName = form.altName
+            else
+                methodName = opcode.masmName
+            end
+            if opcode.attributes[:branch]
+                outp.print "result = "
+            end
+            outp.print "jit.#{methodName}("
+
+            form.kinds.each_with_index {
+                | kind, index |
+                if index != 0
+                    outp.print ", "
+                end
+                case kind.name
+                when "Tmp"
+                    if overload.signature[index].type == "G"
+                        outp.print "args[#{index}].gpr()"
+                    else
+                        outp.print "args[#{index}].fpr()"
+                    end
+                when "Imm", "BitImm"
+                    outp.print "args[#{index}].asTrustedImm32()"
+                when "BigImm", "BitImm64"
+                    outp.print "args[#{index}].asTrustedImm64()"
+                when "Addr"
+                    outp.print "args[#{index}].asAddress()"
+                when "Index"
+                    outp.print "args[#{index}].asBaseIndex()"
+                when "RelCond"
+                    outp.print "args[#{index}].asRelationalCondition()"
+                when "ResCond"
+                    outp.print "args[#{index}].asResultCondition()"
+                when "DoubleCond"
+                    outp.print "args[#{index}].asDoubleCondition()"
+                end
+            }
+
+            outp.puts ");"
+            outp.puts "OPGEN_RETURN(result);"
+            endArchs(outp, form.archs)
+        end
+    }
+    outp.puts "RELEASE_ASSERT_NOT_REACHED();"
+    outp.puts "return result;"
+    outp.puts "}"
+
+    outp.puts "} } } // namespace JSC::B3::Air"
+}
+
+# This is a hack for JSAir. It's a joke.
+File.open("JSAir_opcode.js", "w") {
+    | outp |
+    outp.puts "\"use strict\";"
+    outp.puts "// Generated by opcode_generator.rb from #{$fileName} -- do not edit!"
+    
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "const #{opcode.name} = Symbol(#{opcode.name.inspect});"
+    }
+    
+    outp.puts "function Inst_forEachArg(inst, func)"
+    outp.puts "{"
+    outp.puts "let replacement;"
+    outp.puts "switch (inst.opcode) {"
+    $opcodes.values.each {
+        | opcode |
+        outp.puts "case #{opcode.name}:"
+        if opcode.custom
+            outp.puts "#{opcode.name}Custom.forEachArg(inst, func);"
+        else
+            needOverloadSwitch = opcode.overloads.size != 1
+            outp.puts "switch (inst.args.length) {" if needOverloadSwitch
+            opcode.overloads.each {
+                | overload |
+                outp.puts "case #{overload.signature.length}:" if needOverloadSwitch
+                overload.signature.each_with_index {
+                    | arg, index |
+                    role = nil
+                    case arg.role
+                    when "U"
+                        role = "Use"
+                    when "D"
+                        role = "Def"
+                    when "ZD"
+                        role = "ZDef"
+                    when "UD"
+                        role = "UseDef"
+                    when "UZD"
+                        role = "UseZDef"
+                    when "UA"
+                        role = "UseAddr"
+                    when "S"
+                        role = "Scratch"
+                    else
+                        raise
+                    end
+                    
+                    outp.puts "inst.visitArg(#{index}, func, Arg.#{role}, #{arg.type}P, #{arg.width});"
+                }
+                outp.puts "break;"
+            }
+            if needOverloadSwitch
+                outp.puts "default:"
+                outp.puts "throw new Error(\"Bad overload\");"
+                outp.puts "break;"
+                outp.puts "}"
+            end
+        end
+        outp.puts "break;"
+    }
+    outp.puts "default:"
+    outp.puts "throw \"Bad opcode\";"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "function Inst_hasNonArgEffects(inst)"
+    outp.puts "{"
+    outp.puts "switch (inst.opcode) {"
+    foundTrue = false
+    $opcodes.values.each {
+        | opcode |
+        if opcode.attributes[:terminal] or opcode.attributes[:effects]
+            outp.puts "case #{opcode.name}:"
+            foundTrue = true
+        end
+    }
+    if foundTrue
+        outp.puts "return true;"
+    end
+    $opcodes.values.each {
+        | opcode |
+        if opcode.custom
+            outp.puts "case #{opcode.name}:"
+            outp.puts "return #{opcode.name}Custom.hasNonArgNonControlEffects(inst);"
+        end
+    }
+    outp.puts "default:"
+    outp.puts "return false;"
+    outp.puts "}"
+    outp.puts "}"
+    
+    outp.puts "function opcodeCode(opcode)"
+    outp.puts "{"
+    outp.puts "switch (opcode) {"
+    $opcodes.keys.sort.each_with_index {
+        | opcode, index |
+        outp.puts "case #{opcode}:"
+        outp.puts "return #{index}"
+    }
+    outp.puts "default:"
+    outp.puts "throw new Error(\"bad opcode\");"
+    outp.puts "}"
+    outp.puts "}"
+}
+
diff --git a/b3/air/testair.cpp b/b3/air/testair.cpp
new file mode 100644
index 0000000..9f8a8d8
--- /dev/null
+++ b/b3/air/testair.cpp
@@ -0,0 +1,1964 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirGenerate.h"
+#include "AirInstInlines.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3Compilation.h"
+#include "B3Procedure.h"
+#include "CCallHelpers.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+    dataLog("Usage: testb3 []\n");
+    if (hiddenTruthBecauseNoReturnIsStupid())
+        exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3::Air;
+
+namespace {
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do {                                                   \
+        if (!!(x))                                                      \
+            break;                                                      \
+        crashLock.lock();                                               \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+        CRASH();                                                        \
+    } while (false)
+
+VM* vm;
+
+std::unique_ptr compile(B3::Procedure& proc)
+{
+    prepareForGeneration(proc.code());
+    CCallHelpers jit(vm);
+    generate(proc.code(), jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+
+    return std::make_unique(
+        FINALIZE_CODE(linkBuffer, ("testair compilation")), proc.releaseByproducts());
+}
+
+template
+T invoke(const B3::Compilation& code, Arguments... arguments)
+{
+    T (*function)(Arguments...) = bitwise_cast(code.code().executableAddress());
+    return function(arguments...);
+}
+
+template
+T compileAndRun(B3::Procedure& procedure, Arguments... arguments)
+{
+    return invoke(*compile(procedure), arguments...);
+}
+
+void testSimple()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Arg::imm(42), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+// Use this to put a constant into a register without Air being able to see the constant.
+template
+void loadConstantImpl(BasicBlock* block, T value, B3::Air::Opcode move, Tmp tmp, Tmp scratch)
+{
+    static StaticLock lock;
+    static std::map* map; // I'm not messing with HashMap's problems with integers.
+
+    LockHolder locker(lock);
+    if (!map)
+        map = new std::map();
+
+    if (!map->count(value))
+        (*map)[value] = new T(value);
+
+    T* ptr = (*map)[value];
+    block->append(Move, nullptr, Arg::bigImm(bitwise_cast(ptr)), scratch);
+    block->append(move, nullptr, Arg::addr(scratch), tmp);
+}
+
+void loadConstant(BasicBlock* block, intptr_t value, Tmp tmp)
+{
+    loadConstantImpl(block, value, Move, tmp, tmp);
+}
+
+void loadDoubleConstant(BasicBlock* block, double value, Tmp tmp, Tmp scratch)
+{
+    loadConstantImpl(block, value, MoveDouble, tmp, scratch);
+}
+
+void testShuffleSimpleSwap()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 4);
+    CHECK(things[3] == 3);
+}
+
+void testShuffleSimpleShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+    int32_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+}
+
+void testShuffleLongShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 7);
+}
+
+void testShuffleLongShiftBackwards()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 7);
+}
+
+void testShuffleSimpleRotate()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 4);
+}
+
+void testShuffleSimpleBroadcast()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+    int32_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 1);
+}
+
+void testShuffleBroadcastAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Arg::imm(35), Tmp(GPRInfo::regT0));
+    unsigned count = 1;
+    for (Reg reg : regs) {
+        if (reg != Reg(GPRInfo::regT0))
+            loadConstant(root, count++, Tmp(reg));
+    }
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (Reg reg : regs) {
+        if (reg != Reg(GPRInfo::regT0))
+            shuffle.append(Tmp(GPRInfo::regT0), Tmp(reg), Arg::widthArg(Arg::Width32));
+    }
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    for (int32_t thing : things)
+        CHECK(thing == 35);
+}
+
+void testShuffleTreeShift()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 2);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+    CHECK(things[6] == 3);
+    CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftBackward()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 1);
+    CHECK(things[3] == 2);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+    CHECK(things[6] == 3);
+    CHECK(things[7] == 4);
+}
+
+void testShuffleTreeShiftOtherBackward()
+{
+    // NOTE: This test was my original attempt at TreeShiftBackward but mistakes were made. So, this
+    // ends up being just a weird test. But weird tests are useful, so I kept it.
+    
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    loadConstant(root, 7, Tmp(GPRInfo::regT6));
+    loadConstant(root, 8, Tmp(GPRInfo::regT7));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT7), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT6), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT6), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT7), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32));
+
+    int32_t things[8];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT6), Arg::addr(base, 6 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT7), Arg::addr(base, 7 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 8);
+    CHECK(things[2] == 8);
+    CHECK(things[3] == 7);
+    CHECK(things[4] == 7);
+    CHECK(things[5] == 6);
+    CHECK(things[6] == 6);
+    CHECK(things[7] == 5);
+}
+
+void testShuffleMultipleShifts()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+    CHECK(things[4] == 3);
+    CHECK(things[5] == 1);
+}
+
+void testShuffleRotateWithFringe()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithFringeInWeirdOrder()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 2);
+    CHECK(things[5] == 3);
+}
+
+void testShuffleRotateWithLongFringe()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 1);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleMultipleRotates()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT5), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 6);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAndRotate()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    loadConstant(root, 4, Tmp(GPRInfo::regT3));
+    loadConstant(root, 5, Tmp(GPRInfo::regT4));
+    loadConstant(root, 6, Tmp(GPRInfo::regT5));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT1), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT0), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT4), Tmp(GPRInfo::regT5), Arg::widthArg(Arg::Width32));
+
+    int32_t things[6];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT5), Arg::addr(base, 5 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 3);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 2);
+    CHECK(things[3] == 4);
+    CHECK(things[4] == 4);
+    CHECK(things[5] == 5);
+}
+
+void testShuffleShiftAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, 35 + i, Tmp(regs[i]));
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 35);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        CHECK(things[i] == 35 + static_cast(i) - 1);
+}
+
+void testShuffleRotateAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    const Vector& regs = code.regsInPriorityOrder(Arg::GP);
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, 35 + i, Tmp(regs[i]));
+    Inst& shuffle = root->append(Shuffle, nullptr);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+    shuffle.append(Tmp(regs.last()), Tmp(regs[0]), Arg::widthArg(Arg::Width32));
+
+    StackSlot* slot = code.addStackSlot(sizeof(int32_t) * regs.size(), StackSlotKind::Locked);
+    for (unsigned i = 0; i < regs.size(); ++i)
+        root->append(Move32, nullptr, Tmp(regs[i]), Arg::stack(slot, i * sizeof(int32_t)));
+
+    Vector things(regs.size(), 666);
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), base);
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
+        root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
+    }
+    
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 35 + static_cast(regs.size()) - 1);
+    for (unsigned i = 1; i < regs.size(); ++i)
+        CHECK(things[i] == 35 + static_cast(i) - 1);
+}
+
+void testShuffleSimpleSwap64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+    int64_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 40000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+}
+
+void testShuffleSimpleShift64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width64));
+
+    int64_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 30000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+    CHECK(things[4] == 40000000000000000ll);
+}
+
+void testShuffleSwapMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width32),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT2), Arg::widthArg(Arg::Width64));
+
+    int64_t things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 40000000000000000ll);
+    CHECK(things[3] == static_cast(30000000000000000ll));
+}
+
+void testShuffleShiftMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 10000000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 20000000000000000ll, Tmp(GPRInfo::regT1));
+    loadConstant(root, 30000000000000000ll, Tmp(GPRInfo::regT2));
+    loadConstant(root, 40000000000000000ll, Tmp(GPRInfo::regT3));
+    loadConstant(root, 50000000000000000ll, Tmp(GPRInfo::regT4));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT2), Tmp(GPRInfo::regT3), Arg::widthArg(Arg::Width64),
+        Tmp(GPRInfo::regT3), Tmp(GPRInfo::regT4), Arg::widthArg(Arg::Width32));
+
+    int64_t things[5];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT3), Arg::addr(base, 3 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT4), Arg::addr(base, 4 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 10000000000000000ll);
+    CHECK(things[1] == 20000000000000000ll);
+    CHECK(things[2] == 30000000000000000ll);
+    CHECK(things[3] == 30000000000000000ll);
+    CHECK(things[4] == static_cast(40000000000000000ll));
+}
+
+void testShuffleShiftMemory()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32));
+
+    int32_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(memory[0] == 35);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryLong()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    loadConstant(root, 3, Tmp(GPRInfo::regT2));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT3));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+        
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT3), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT3), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT2),
+        Arg::widthArg(Arg::Width32));
+
+    int32_t things[3];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 1);
+    CHECK(things[2] == 36);
+    CHECK(memory[0] == 2);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, i + 1, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int32_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move32, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int32_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 36);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i));
+    CHECK(memory[0] == 1);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleShiftMemoryAllRegs64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width64));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1000000000000ll);
+    CHECK(things[1] == 36000000000000ll);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == 1000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+int64_t combineHiLo(int64_t high, int64_t low)
+{
+    union {
+        int64_t value;
+        int32_t halves[2];
+    } u;
+    u.value = high;
+    u.halves[0] = static_cast(low);
+    return u.value;
+}
+
+void testShuffleShiftMemoryAllRegsMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i) {
+        shuffle.append(
+            Tmp(regs[i - 1]), Tmp(regs[i]),
+            (i & 1) ? Arg::widthArg(Arg::Width32) : Arg::widthArg(Arg::Width64));
+    }
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1000000000000ll);
+    CHECK(things[1] == static_cast(36000000000000ll));
+    for (unsigned i = 2; i < regs.size(); ++i) {
+        int64_t value = static_cast(i) * 1000000000000ll;
+        CHECK(things[i] == ((i & 1) ? static_cast(value) : value));
+    }
+    CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemory()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int32_t memory[2];
+    memory[0] = 35;
+    memory[1] = 36;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int32_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int32_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width32));
+
+    int32_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
+    root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36);
+    CHECK(things[1] == 1);
+    CHECK(memory[0] == 2);
+    CHECK(memory[1] == 35);
+}
+
+void testShuffleRotateMemory64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width64),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width64));
+
+    int64_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36000000000000ll);
+    CHECK(things[1] == 1000000000000ll);
+    CHECK(memory[0] == 2000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    BasicBlock* root = code.addBlock();
+    loadConstant(root, 1000000000000ll, Tmp(GPRInfo::regT0));
+    loadConstant(root, 2000000000000ll, Tmp(GPRInfo::regT1));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT2));
+    root->append(
+        Shuffle, nullptr,
+        
+        Tmp(GPRInfo::regT0), Tmp(GPRInfo::regT1), Arg::widthArg(Arg::Width32),
+
+        Tmp(GPRInfo::regT1), Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT2), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width32),
+
+        Arg::addr(Tmp(GPRInfo::regT2), 1 * sizeof(int64_t)), Tmp(GPRInfo::regT0),
+        Arg::widthArg(Arg::Width64));
+
+    int64_t things[2];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
+    root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 36000000000000ll);
+    CHECK(things[1] == static_cast(1000000000000ll));
+    CHECK(memory[0] == 2000000000000ll);
+    CHECK(memory[1] == combineHiLo(36000000000000ll, 35000000000000ll));
+}
+
+void testShuffleRotateMemoryAllRegs64()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width64),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width64),
+
+        regs.last(), regs[0], Arg::widthArg(Arg::Width64));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == static_cast(regs.size()) * 1000000000000ll);
+    CHECK(things[1] == 36000000000000ll);
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == 1000000000000ll);
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleRotateMemoryAllRegsMixedWidth()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    int64_t memory[2];
+    memory[0] = 35000000000000ll;
+    memory[1] = 36000000000000ll;
+
+    Vector regs = code.regsInPriorityOrder(Arg::GP);
+    regs.removeFirst(Reg(GPRInfo::regT0));
+
+    BasicBlock* root = code.addBlock();
+    for (unsigned i = 0; i < regs.size(); ++i)
+        loadConstant(root, (i + 1) * 1000000000000ll, Tmp(regs[i]));
+    root->append(Move, nullptr, Arg::immPtr(&memory), Tmp(GPRInfo::regT0));
+    Inst& shuffle = root->append(
+        Shuffle, nullptr,
+        
+        Tmp(regs[0]), Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::widthArg(Arg::Width32),
+        
+        Arg::addr(Tmp(GPRInfo::regT0), 0 * sizeof(int64_t)),
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Arg::widthArg(Arg::Width64),
+
+        Arg::addr(Tmp(GPRInfo::regT0), 1 * sizeof(int64_t)), Tmp(regs[1]),
+        Arg::widthArg(Arg::Width32),
+
+        regs.last(), regs[0], Arg::widthArg(Arg::Width32));
+
+    for (unsigned i = 2; i < regs.size(); ++i)
+        shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
+
+    Vector things(regs.size(), 666);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things[0])), Tmp(GPRInfo::regT0));
+    for (unsigned i = 0; i < regs.size(); ++i) {
+        root->append(
+            Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
+    }
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == static_cast(static_cast(regs.size()) * 1000000000000ll));
+    CHECK(things[1] == static_cast(36000000000000ll));
+    for (unsigned i = 2; i < regs.size(); ++i)
+        CHECK(things[i] == static_cast(i) * 1000000000000ll);
+    CHECK(memory[0] == combineHiLo(35000000000000ll, 1000000000000ll));
+    CHECK(memory[1] == 35000000000000ll);
+}
+
+void testShuffleSwapDouble()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64),
+        Tmp(FPRInfo::fpRegT3), Tmp(FPRInfo::fpRegT2), Arg::widthArg(Arg::Width64));
+
+    double things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 4);
+    CHECK(things[3] == 3);
+}
+
+void testShuffleShiftDouble()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    loadDoubleConstant(root, 1, Tmp(FPRInfo::fpRegT0), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 2, Tmp(FPRInfo::fpRegT1), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 3, Tmp(FPRInfo::fpRegT2), Tmp(GPRInfo::regT0));
+    loadDoubleConstant(root, 4, Tmp(FPRInfo::fpRegT3), Tmp(GPRInfo::regT0));
+    root->append(
+        Shuffle, nullptr,
+        Tmp(FPRInfo::fpRegT2), Tmp(FPRInfo::fpRegT3), Arg::widthArg(Arg::Width64));
+
+    double things[4];
+    Tmp base = code.newTmp(Arg::GP);
+    root->append(Move, nullptr, Arg::bigImm(bitwise_cast(&things)), base);
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT3), Arg::addr(base, 3 * sizeof(double)));
+    root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
+    root->append(Ret32, nullptr, Tmp(GPRInfo::returnValueGPR));
+
+    memset(things, 0, sizeof(things));
+    
+    CHECK(!compileAndRun(proc));
+
+    CHECK(things[0] == 1);
+    CHECK(things[1] == 2);
+    CHECK(things[2] == 3);
+    CHECK(things[3] == 3);
+}
+
+#if CPU(X86) || CPU(X86_64)
+void testX86VMULSD()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp1DestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOp2DestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDOpsDestRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR1), Tmp(X86Registers::xmm13));
+    root->append(MulDouble, nullptr, Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm13), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    CHECK(compileAndRun(proc, 2.4, 4.2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), - 16), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), - 16), Tmp(FPRInfo::argumentFPR2));
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR2), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(GPRInfo::argumentGPR0), 16), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 2, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDRegOpDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(MoveDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm14));
+    root->append(MulDouble, nullptr, Arg::addr(Tmp(GPRInfo::argumentGPR0)), Tmp(X86Registers::xmm14), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDAddrOpDestRexAddr()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Tmp(FPRInfo::argumentFPR0), Arg::addr(Tmp(X86Registers::r13), 8), Tmp(X86Registers::xmm15));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm15), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 1, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseNeedsRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r13), Tmp(GPRInfo::argumentGPR1)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = 8;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDIndexNeedsRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = - 8;
+    CHECK(compileAndRun(proc, 2.4, &secondArg + 1, index, pureNaN()) == 2.4 * 4.2);
+}
+
+void testX86VMULSDBaseIndexNeedRex()
+{
+    B3::Procedure proc;
+    Code& code = proc.code();
+
+    BasicBlock* root = code.addBlock();
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR0), Tmp(X86Registers::r12));
+    root->append(Move, nullptr, Tmp(GPRInfo::argumentGPR1), Tmp(X86Registers::r13));
+    root->append(MulDouble, nullptr, Arg::index(Tmp(X86Registers::r12), Tmp(X86Registers::r13)), Tmp(FPRInfo::argumentFPR0), Tmp(X86Registers::xmm0));
+    root->append(MoveDouble, nullptr, Tmp(X86Registers::xmm0), Tmp(FPRInfo::returnValueFPR));
+    root->append(RetDouble, nullptr, Tmp(FPRInfo::returnValueFPR));
+
+    double secondArg = 4.2;
+    uint64_t index = 16;
+    CHECK(compileAndRun(proc, 2.4, &secondArg - 2, index, pureNaN()) == 2.4 * 4.2);
+}
+
+#endif
+
+#define RUN(test) do {                          \
+        if (!shouldRun(#test))                  \
+            break;                              \
+        tasks.append(                           \
+            createSharedTask(           \
+                [&] () {                        \
+                    dataLog(#test "...\n");     \
+                    test;                       \
+                    dataLog(#test ": OK!\n");   \
+                }));                            \
+    } while (false);
+
+void run(const char* filter)
+{
+    JSC::initializeThreading();
+    vm = &VM::create(LargeHeap).leakRef();
+
+    Deque>> tasks;
+
+    auto shouldRun = [&] (const char* testName) -> bool {
+        return !filter || !!strcasestr(testName, filter);
+    };
+
+    RUN(testSimple());
+    
+    RUN(testShuffleSimpleSwap());
+    RUN(testShuffleSimpleShift());
+    RUN(testShuffleLongShift());
+    RUN(testShuffleLongShiftBackwards());
+    RUN(testShuffleSimpleRotate());
+    RUN(testShuffleSimpleBroadcast());
+    RUN(testShuffleBroadcastAllRegs());
+    RUN(testShuffleTreeShift());
+    RUN(testShuffleTreeShiftBackward());
+    RUN(testShuffleTreeShiftOtherBackward());
+    RUN(testShuffleMultipleShifts());
+    RUN(testShuffleRotateWithFringe());
+    RUN(testShuffleRotateWithFringeInWeirdOrder());
+    RUN(testShuffleRotateWithLongFringe());
+    RUN(testShuffleMultipleRotates());
+    RUN(testShuffleShiftAndRotate());
+    RUN(testShuffleShiftAllRegs());
+    RUN(testShuffleRotateAllRegs());
+    RUN(testShuffleSimpleSwap64());
+    RUN(testShuffleSimpleShift64());
+    RUN(testShuffleSwapMixedWidth());
+    RUN(testShuffleShiftMixedWidth());
+    RUN(testShuffleShiftMemory());
+    RUN(testShuffleShiftMemoryLong());
+    RUN(testShuffleShiftMemoryAllRegs());
+    RUN(testShuffleShiftMemoryAllRegs64());
+    RUN(testShuffleShiftMemoryAllRegsMixedWidth());
+    RUN(testShuffleRotateMemory());
+    RUN(testShuffleRotateMemory64());
+    RUN(testShuffleRotateMemoryMixedWidth());
+    RUN(testShuffleRotateMemoryAllRegs64());
+    RUN(testShuffleRotateMemoryAllRegsMixedWidth());
+    RUN(testShuffleSwapDouble());
+    RUN(testShuffleShiftDouble());
+
+#if CPU(X86) || CPU(X86_64)
+    RUN(testX86VMULSD());
+    RUN(testX86VMULSDDestRex());
+    RUN(testX86VMULSDOp1DestRex());
+    RUN(testX86VMULSDOp2DestRex());
+    RUN(testX86VMULSDOpsDestRex());
+
+    RUN(testX86VMULSDAddr());
+    RUN(testX86VMULSDAddrOpRexAddr());
+    RUN(testX86VMULSDDestRexAddr());
+    RUN(testX86VMULSDRegOpDestRexAddr());
+    RUN(testX86VMULSDAddrOpDestRexAddr());
+
+    RUN(testX86VMULSDBaseNeedsRex());
+    RUN(testX86VMULSDIndexNeedsRex());
+    RUN(testX86VMULSDBaseIndexNeedRex());
+#endif
+
+    if (tasks.isEmpty())
+        usage();
+
+    Lock lock;
+
+    Vector threads;
+    for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+        threads.append(
+            createThread(
+                "testb3 thread",
+                [&] () {
+                    for (;;) {
+                        RefPtr> task;
+                        {
+                            LockHolder locker(lock);
+                            if (tasks.isEmpty())
+                                return;
+                            task = tasks.takeFirst();
+                        }
+
+                        task->run();
+                    }
+                }));
+    }
+
+    for (ThreadIdentifier thread : threads)
+        waitForThreadCompletion(thread);
+    crashLock.lock();
+}
+
+} // anonymois namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+    dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+    const char* filter = nullptr;
+    switch (argc) {
+    case 1:
+        break;
+    case 2:
+        filter = argv[1];
+        break;
+    default:
+        usage();
+        break;
+    }
+    
+    run(filter);
+    return 0;
+}
diff --git a/b3/testb3.cpp b/b3/testb3.cpp
new file mode 100644
index 0000000..2cec4b5
--- /dev/null
+++ b/b3/testb3.cpp
@@ -0,0 +1,15735 @@
+/*
+ * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+
+#include "AirCode.h"
+#include "AirInstInlines.h"
+#include "AirValidate.h"
+#include "AllowMacroScratchRegisterUsage.h"
+#include "B3ArgumentRegValue.h"
+#include "B3BasicBlockInlines.h"
+#include "B3CCallValue.h"
+#include "B3Compilation.h"
+#include "B3Compile.h"
+#include "B3ComputeDivisionMagic.h"
+#include "B3Const32Value.h"
+#include "B3ConstPtrValue.h"
+#include "B3Effects.h"
+#include "B3FenceValue.h"
+#include "B3Generate.h"
+#include "B3LowerToAir.h"
+#include "B3MathExtras.h"
+#include "B3MemoryValue.h"
+#include "B3MoveConstants.h"
+#include "B3Procedure.h"
+#include "B3ReduceStrength.h"
+#include "B3SlotBaseValue.h"
+#include "B3StackSlot.h"
+#include "B3StackmapGenerationParams.h"
+#include "B3SwitchValue.h"
+#include "B3UpsilonValue.h"
+#include "B3UseCounts.h"
+#include "B3Validate.h"
+#include "B3ValueInlines.h"
+#include "B3VariableValue.h"
+#include "B3WasmAddressValue.h"
+#include "B3WasmBoundsCheckValue.h"
+#include "CCallHelpers.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "InitializeThreading.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "PureNaN.h"
+#include "VM.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
+static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
+
+static void usage()
+{
+    dataLog("Usage: testb3 []\n");
+    if (hiddenTruthBecauseNoReturnIsStupid())
+        exit(1);
+}
+
+#if ENABLE(B3_JIT)
+
+using namespace JSC;
+using namespace JSC::B3;
+
+namespace {
+
+bool shouldBeVerbose()
+{
+    return shouldDumpIR(B3Mode);
+}
+
+StaticLock crashLock;
+
+// Nothing fancy for now; we just use the existing WTF assertion machinery.
+#define CHECK(x) do {                                                   \
+        if (!!(x))                                                      \
+            break;                                                      \
+        crashLock.lock();                                               \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #x); \
+        CRASH();                                                        \
+    } while (false)
+
+#define CHECK_EQ(x, y) do { \
+        auto __x = (x); \
+        auto __y = (y); \
+        if (__x == __y) \
+            break; \
+        crashLock.lock(); \
+        WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, toCString(#x " == " #y, " (" #x " == ", __x, ", " #y " == ", __y, ")").data()); \
+        CRASH(); \
+    } while (false)
+
+VM* vm;
+
+std::unique_ptr compile(Procedure& procedure, unsigned optLevel = 1)
+{
+    return std::make_unique(B3::compile(*vm, procedure, optLevel));
+}
+
+template
+T invoke(MacroAssemblerCodePtr ptr, Arguments... arguments)
+{
+    T (*function)(Arguments...) = bitwise_cast(ptr.executableAddress());
+    return function(arguments...);
+}
+
+template
+T invoke(const Compilation& code, Arguments... arguments)
+{
+    return invoke(code.code(), arguments...);
+}
+
+template
+T compileAndRun(Procedure& procedure, Arguments... arguments)
+{
+    return invoke(*compile(procedure), arguments...);
+}
+
+void lowerToAirForTesting(Procedure& proc)
+{
+    proc.resetReachability();
+    
+    if (shouldBeVerbose())
+        dataLog("B3 before lowering:\n", proc);
+    
+    validate(proc);
+    lowerToAir(proc);
+    
+    if (shouldBeVerbose())
+        dataLog("Air after lowering:\n", proc.code());
+    
+    Air::validate(proc.code());
+}
+
+template
+void checkDisassembly(Compilation& compilation, const Func& func, CString failText)
+{
+    CString disassembly = compilation.disassembly();
+    if (func(disassembly.data()))
+        return;
+    
+    crashLock.lock();
+    dataLog("Bad lowering!  ", failText, "\n");
+    dataLog(disassembly);
+    CRASH();
+}
+
+void checkUsesInstruction(Compilation& compilation, const char* text)
+{
+    checkDisassembly(
+        compilation,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, text);
+        },
+        toCString("Expected to find ", text, " but didnt!"));
+}
+
+void checkDoesNotUseInstruction(Compilation& compilation, const char* text)
+{
+    checkDisassembly(
+        compilation,
+        [&] (const char* disassembly) -> bool {
+            return !strstr(disassembly, text);
+        },
+        toCString("Did not expected to find ", text, " but it's there!"));
+}
+
+template
+struct Operand {
+    const char* name;
+    Type value;
+};
+
+typedef Operand Int64Operand;
+typedef Operand Int32Operand;
+
+template
+void populateWithInterestingValues(Vector>& operands)
+{
+    operands.append({ "0.", static_cast(0.) });
+    operands.append({ "-0.", static_cast(-0.) });
+    operands.append({ "0.4", static_cast(0.5) });
+    operands.append({ "-0.4", static_cast(-0.5) });
+    operands.append({ "0.5", static_cast(0.5) });
+    operands.append({ "-0.5", static_cast(-0.5) });
+    operands.append({ "0.6", static_cast(0.5) });
+    operands.append({ "-0.6", static_cast(-0.5) });
+    operands.append({ "1.", static_cast(1.) });
+    operands.append({ "-1.", static_cast(-1.) });
+    operands.append({ "2.", static_cast(2.) });
+    operands.append({ "-2.", static_cast(-2.) });
+    operands.append({ "M_PI", static_cast(M_PI) });
+    operands.append({ "-M_PI", static_cast(-M_PI) });
+    operands.append({ "min", std::numeric_limits::min() });
+    operands.append({ "max", std::numeric_limits::max() });
+    operands.append({ "lowest", std::numeric_limits::lowest() });
+    operands.append({ "epsilon", std::numeric_limits::epsilon() });
+    operands.append({ "infiniti", std::numeric_limits::infinity() });
+    operands.append({ "-infiniti", - std::numeric_limits::infinity() });
+    operands.append({ "PNaN", static_cast(PNaN) });
+}
+
+template
+Vector> floatingPointOperands()
+{
+    Vector> operands;
+    populateWithInterestingValues(operands);
+    return operands;
+};
+
+static Vector int64Operands()
+{
+    Vector operands;
+    operands.append({ "0", 0 });
+    operands.append({ "1", 1 });
+    operands.append({ "-1", -1 });
+    operands.append({ "42", 42 });
+    operands.append({ "-42", -42 });
+    operands.append({ "int64-max", std::numeric_limits::max() });
+    operands.append({ "int64-min", std::numeric_limits::min() });
+    operands.append({ "int32-max", std::numeric_limits::max() });
+    operands.append({ "int32-min", std::numeric_limits::min() });
+
+    return operands;
+}
+
+static Vector int32Operands()
+{
+    Vector operands({
+        { "0", 0 },
+        { "1", 1 },
+        { "-1", -1 },
+        { "42", 42 },
+        { "-42", -42 },
+        { "int32-max", std::numeric_limits::max() },
+        { "int32-min", std::numeric_limits::min() }
+    });
+    return operands;
+}
+
+void add32(CCallHelpers& jit, GPRReg src1, GPRReg src2, GPRReg dest)
+{
+    if (src2 == dest)
+        jit.add32(src1, dest);
+    else {
+        jit.move(src1, dest);
+        jit.add32(src2, dest);
+    }
+}
+
+void test42()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* const42 = root->appendNew(proc, Origin(), 42);
+    root->appendNewControlValue(proc, Return, Origin(), const42);
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+void testLoad42()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &x)));
+
+    CHECK(compileAndRun(proc) == 42);
+}
+
+void testLoadWithOffsetImpl(int32_t offset64, int32_t offset32)
+{
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int64_t x = -42;
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Load, Int64, Origin(),
+                base,
+                offset64));
+
+        char* address = reinterpret_cast(&x) - offset64;
+        CHECK(compileAndRun(proc, address) == -42);
+    }
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t x = -42;
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                base,
+                offset32));
+
+        char* address = reinterpret_cast(&x) - offset32;
+        CHECK(compileAndRun(proc, address) == -42);
+    }
+}
+
+void testLoadOffsetImm9Max()
+{
+    testLoadWithOffsetImpl(255, 255);
+}
+
+void testLoadOffsetImm9MaxPlusOne()
+{
+    testLoadWithOffsetImpl(256, 256);
+}
+
+void testLoadOffsetImm9MaxPlusTwo()
+{
+    testLoadWithOffsetImpl(257, 257);
+}
+
+void testLoadOffsetImm9Min()
+{
+    testLoadWithOffsetImpl(-256, -256);
+}
+
+void testLoadOffsetImm9MinMinusOne()
+{
+    testLoadWithOffsetImpl(-257, -257);
+}
+
+void testLoadOffsetScaledUnsignedImm12Max()
+{
+    testLoadWithOffsetImpl(32760, 16380);
+}
+
+void testLoadOffsetScaledUnsignedOverImm12Max()
+{
+    testLoadWithOffsetImpl(32760, 32760);
+    testLoadWithOffsetImpl(32761, 16381);
+    testLoadWithOffsetImpl(32768, 16384);
+}
+
+void testArg(int argument)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+
+    CHECK(compileAndRun(proc, argument) == argument);
+}
+
+void testReturnConst64(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(compileAndRun(proc) == value);
+}
+
+void testReturnVoid()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(proc, Return, Origin());
+    compileAndRun(proc);
+}
+
+void testAddArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a + a);
+}
+
+void testAddArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testAddArgImm(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a + b);
+}
+
+void testAddImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a + b);
+}
+
+void testAddArgMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, a, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        load,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, &a, b) == a + b);
+}
+
+void testAddImmMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a + a);
+}
+
+void testAddArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testAddArgMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Add, Origin(), argument, load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, a, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddMemArg32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Add, Origin(), load, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, &a, b) == a + b);
+}
+
+void testAddImmMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Add, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a + b);
+}
+
+void testAddArgZeroImmZDef()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* constZero = root->appendNew(proc, Origin(), 0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            arg,
+            constZero));
+
+    auto code = compile(proc, 0);
+    CHECK(invoke(*code, 0x0123456789abcdef) == 0x89abcdef);
+}
+
+void testAddLoadTwice()
+{
+    auto test = [&] (unsigned optLevel) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t value = 42;
+        Value* load = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &value));
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(proc, Add, Origin(), load, load));
+
+        auto code = compile(proc, optLevel);
+        CHECK(invoke(*code) == 42 * 2);
+    };
+
+    test(0);
+    test(1);
+}
+
+void testAddArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a + a));
+}
+
+void testAddArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a + b));
+}
+
+void testAddArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a + b));
+}
+
+void testAddImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a + b));
+}
+
+void testAddImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Add, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a + b));
+}
+
+void testAddArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + a)));
+}
+
+void testAddArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddFPRArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR1));
+    Value* result = root->appendNew(proc, Add, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a + b));
+}
+
+void testAddArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Add, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + b)));
+}
+
+void testAddImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Add, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Add, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a + b)));
+}
+
+void testAddArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a + a)));
+}
+
+void testAddArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a + b)));
+}
+
+void testAddArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Add, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a + b)));
+    CHECK(isIdentical(effect, static_cast(a) + static_cast(b)));
+}
+
+void testMulArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == a * a);
+}
+
+void testMulArgStore(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int mulSlot;
+    int valueSlot;
+    
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* mul = root->appendNew(proc, Mul, Origin(), value, value);
+
+    root->appendNew(
+        proc, Store, Origin(), value,
+        root->appendNew(proc, Origin(), &valueSlot));
+    root->appendNew(
+        proc, Store, Origin(), mul,
+        root->appendNew(proc, Origin(), &mulSlot));
+
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, a));
+    CHECK(mulSlot == a * a);
+    CHECK(valueSlot == a);
+}
+
+void testMulAddArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Mul, Origin(), value, value),
+            value));
+
+    CHECK(compileAndRun(proc, a) == a * a + a);
+}
+
+void testMulArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a * b);
+}
+
+void testMulArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a * b);
+}
+
+void testMulImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a * b);
+}
+
+void testMulArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Mul, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a * b);
+}
+
+void testMulLoadTwice()
+{
+    auto test = [&] (unsigned optLevel) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        int32_t value = 42;
+        Value* load = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &value));
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(proc, Mul, Origin(), load, load));
+
+        auto code = compile(proc, optLevel);
+        CHECK(invoke(*code) == 42 * 42);
+    };
+
+    test(0);
+    test(1);
+}
+
+void testMulAddArgsLeft()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Add, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsRight()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Add, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsLeft32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Add, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value + c.value);
+            }
+        }
+    }
+}
+
+void testMulAddArgsRight32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Add, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value + b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsLeft()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Sub, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsRight()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Sub, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsLeft32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* added = root->appendNew(proc, Sub, Origin(), multiplied, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value * b.value - c.value);
+            }
+        }
+    }
+}
+
+void testMulSubArgsRight32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg1, arg2);
+    Value* added = root->appendNew(proc, Sub, Origin(), arg0, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            for (auto c : testValues) {
+                CHECK(invoke(*code, a.value, b.value, c.value) == a.value - b.value * c.value);
+            }
+        }
+    }
+}
+
+void testMulNegArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* zero = root->appendNew(proc, Origin(), 0);
+    Value* added = root->appendNew(proc, Sub, Origin(), zero, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int64Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            CHECK(invoke(*code, a.value, b.value) == -(a.value * b.value));
+        }
+    }
+}
+
+void testMulNegArgs32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg0 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* multiplied = root->appendNew(proc, Mul, Origin(), arg0, arg1);
+    Value* zero = root->appendNew(proc, Origin(), 0);
+    Value* added = root->appendNew(proc, Sub, Origin(), zero, multiplied);
+    root->appendNewControlValue(proc, Return, Origin(), added);
+
+    auto code = compile(proc);
+
+    auto testValues = int32Operands();
+    for (auto a : testValues) {
+        for (auto b : testValues) {
+            CHECK(invoke(*code, a.value, b.value) == -(a.value * b.value));
+        }
+    }
+}
+
+void testMulArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a * a));
+}
+
+void testMulArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a * b));
+}
+
+void testMulArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a * b));
+}
+
+void testMulImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a * b));
+}
+
+void testMulImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mul, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a * b));
+}
+
+void testMulArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * a)));
+}
+
+void testMulArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mul, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * b)));
+}
+
+void testMulImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Mul, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mul, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a * b)));
+}
+
+void testMulArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a * a)));
+}
+
+void testMulArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a * b)));
+}
+
+void testMulArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Mul, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleMulress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleMulress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a * b)));
+    CHECK(isIdentical(effect, static_cast(a) * static_cast(b)));
+}
+
+void testDivArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a / a));
+}
+
+void testDivArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a / b));
+}
+
+void testDivArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a / b));
+}
+
+void testDivImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a / b));
+}
+
+void testDivImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Div, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), a / b));
+}
+
+void testDivArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / a)));
+}
+
+void testDivArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Div, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / b)));
+}
+
+void testDivImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Div, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Div, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a / b)));
+}
+
+void testModArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fmod(a, a)));
+}
+
+void testModArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), fmod(a, b)));
+}
+
+void testModArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fmod(a, b)));
+}
+
+void testModImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), fmod(a, b)));
+}
+
+void testModImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Mod, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc), fmod(a, b)));
+}
+
+void testModArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fmod(a, a)))));
+}
+
+void testModArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mod, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Mod, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testModImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Mod, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(static_cast(fmod(a, b)))));
+}
+
+void testDivArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a / a)));
+}
+
+void testDivArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a / b)));
+}
+
+void testDivArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Div, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleDivress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleDivress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a / b)));
+    CHECK(isIdentical(effect, static_cast(a) / static_cast(b)));
+}
+
+void testUDivArgsInt32(uint32_t a, uint32_t b)
+{
+    // UDiv with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, UDiv, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a / b);
+}
+
+void testUDivArgsInt64(uint64_t a, uint64_t b)
+{
+    // UDiv with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, UDiv, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a / b);
+}
+
+void testUModArgsInt32(uint32_t a, uint32_t b)
+{
+    // UMod with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, UMod, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+
+    CHECK_EQ(compileAndRun(proc, a, b), a % b);
+}
+
+void testUModArgsInt64(uint64_t a, uint64_t b)
+{
+    // UMod with denominator == 0 is invalid.
+    if (!b)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, UMod, Origin(), argument1, argument2);
+    root->appendNew(proc, Return, Origin(), result);
+    
+    CHECK_EQ(compileAndRun(proc, a, b), a % b);
+}
+
+void testSubArg(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), value, value));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testSubArgs(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a - b);
+}
+
+void testSubArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a - b);
+}
+
+void testNegValueSubOne(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* negArgument = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        argument);
+    Value* negArgumentMinusOne = root->appendNew(proc, Sub, Origin(),
+        negArgument,
+        root->appendNew(proc, Origin(), 1));
+    root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+    CHECK(compileAndRun(proc, a) == -a - 1);
+}
+
+void testSubImmArg(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == a - b);
+}
+
+void testSubArgMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        load);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, a, &b) == a - b);
+}
+
+void testSubMemArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput, b));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubImmMem(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), b));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+
+void testSubArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == a - b);
+}
+
+void testSubArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == a - b);
+}
+
+void testSubImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == a - b);
+}
+
+void testSubMemArg32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Sub, Origin(), load, argument);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput, b));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubArgMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Sub, Origin(), argument, load);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, a, &b) == a - b);
+}
+
+void testSubImmMem32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), a),
+        load);
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = b;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testSubMemImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* result = root->appendNew(proc, Sub, Origin(),
+        load,
+        root->appendNew(proc, Origin(), b));
+    root->appendNew(proc, Store, Origin(), result, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t inputOutput = a;
+    CHECK(!compileAndRun(proc, &inputOutput));
+    CHECK(inputOutput == a - b);
+}
+
+void testNegValueSubOne32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* negArgument = root->appendNew(proc, Sub, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        argument);
+    Value* negArgumentMinusOne = root->appendNew(proc, Sub, Origin(),
+        negArgument,
+        root->appendNew(proc, Origin(), 1));
+    root->appendNewControlValue(proc, Return, Origin(), negArgumentMinusOne);
+    CHECK(compileAndRun(proc, a) == -a - 1);
+}
+
+void testSubArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), value, value));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a - a));
+}
+
+void testSubArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), a - b));
+}
+
+void testSubArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, a), a - b));
+}
+
+void testSubImmArgDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+
+    CHECK(isIdentical(compileAndRun(proc, b), a - b));
+}
+
+void testSubImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* valueA = root->appendNew(proc, Origin(), a);
+    Value* valueB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), valueA, valueB));
+    
+    CHECK(isIdentical(compileAndRun(proc), a - b));
+}
+
+void testSubArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - a)));
+}
+
+void testSubArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue1, floatValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Sub, Origin(), floatValue, constValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - b)));
+}
+
+void testSubImmArgFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* constValue = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Sub, Origin(), constValue, floatValue);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* constValue1 = root->appendNew(proc, Origin(), a);
+    Value* constValue2 = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, Sub, Origin(), constValue1, constValue2);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(a - b)));
+}
+
+void testSubArgFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble, asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(a - a)));
+}
+
+void testSubArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitwise_cast(a - b)));
+}
+
+void testSubArgsFloatWithEffectfulDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* asDouble1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* asDouble2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+    Value* result = root->appendNew(proc, Sub, Origin(), asDouble1, asDouble2);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* doubleSubress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNew(proc, Store, Origin(), result, doubleSubress);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), &effect), bitwise_cast(a - b)));
+    CHECK(isIdentical(effect, static_cast(a) - static_cast(b)));
+}
+
+void testTernarySubInstructionSelection(B3::Opcode valueModifier, Type valueType, Air::Opcode expectedOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* right = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+    if (valueModifier == Trunc) {
+        left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+        right = root->appendNew(proc, valueModifier, valueType, Origin(), right);
+    }
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sub, Origin(), left, right));
+
+    lowerToAirForTesting(proc);
+
+    auto block = proc.code()[0];
+    unsigned numberOfSubInstructions = 0;
+    for (auto instruction : *block) {
+        if (instruction.kind.opcode == expectedOpcode) {
+            CHECK_EQ(instruction.args.size(), 3ul);
+            CHECK_EQ(instruction.args[0].kind(), Air::Arg::Tmp);
+            CHECK_EQ(instruction.args[1].kind(), Air::Arg::Tmp);
+            CHECK_EQ(instruction.args[2].kind(), Air::Arg::Tmp);
+            numberOfSubInstructions++;
+        }
+    }
+    CHECK_EQ(numberOfSubInstructions, 1ul);
+}
+
+void testNegDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Neg, Origin(),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), -a));
+}
+
+void testNegFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Neg, Origin(), floatValue));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), -a));
+}
+
+void testNegFloatWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argumentInt32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Neg, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), -a));
+}
+
+void testBitAndArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitAndImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a & b));
+}
+
+void testBitAndArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a & b));
+}
+
+void testBitAndImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            innerBitAnd,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitAnd));
+
+    CHECK(compileAndRun(proc, b) == (a & (b & c)));
+}
+
+void testBitAndArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a & b));
+}
+
+void testBitAndSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitAndImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a & b));
+}
+
+void testBitAndArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a & b));
+}
+
+void testBitAndImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a & b));
+}
+
+void testBitAndBitAndArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            innerBitAnd,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a & b) & c));
+}
+
+void testBitAndImmBitAndArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitAnd = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitAnd));
+
+    CHECK(compileAndRun(proc, b) == (a & (b & c)));
+}
+
+void testBitAndWithMaskReturnsBooleans(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg0 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg0, arg1);
+    Value* maskedEqual = root->appendNew(proc, BitAnd, Origin(),
+        root->appendNew(proc, Origin(), 0x5),
+        equal);
+    Value* inverted = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), 0x1),
+        maskedEqual);
+    Value* select = root->appendNew(proc, Select, Origin(), inverted,
+        root->appendNew(proc, Origin(), 42),
+        root->appendNew(proc, Origin(), -5));
+
+    root->appendNewControlValue(proc, Return, Origin(), select);
+
+    int64_t expected = (a == b) ? -5 : 42;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+double bitAndDouble(double a, double b)
+{
+    return bitwise_cast(bitwise_cast(a) & bitwise_cast(b));
+}
+
+void testBitAndArgDouble(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a), bitAndDouble(a, a)));
+}
+
+void testBitAndArgsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndArgImmDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, a, b), bitAndDouble(a, b)));
+}
+
+void testBitAndImmsDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc), bitAndDouble(a, b)));
+}
+
+float bitAndFloat(float a, float b)
+{
+    return bitwise_cast(bitwise_cast(a) & bitwise_cast(b));
+}
+
+void testBitAndArgFloat(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitAndFloat(a, a)));
+}
+
+void testBitAndArgsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndArgImmFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), bitAndFloat(a, b)));
+}
+
+void testBitAndImmsFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    Value* result = root->appendNew(proc, BitAnd, Origin(), argumentA, argumentB);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(isIdentical(compileAndRun(proc), bitAndFloat(a, b)));
+}
+
+void testBitAndArgsFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentA = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    Value* argumentB = root->appendNew(proc, BitwiseCast, Origin(),
+        root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* argumentAasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentA);
+    Value* argumentBasDouble = root->appendNew(proc, FloatToDouble, Origin(), argumentB);
+    Value* doubleResult = root->appendNew(proc, BitAnd, Origin(), argumentAasDouble, argumentBasDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), doubleResult);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    double doubleA = a;
+    double doubleB = b;
+    float expected = static_cast(bitAndDouble(doubleA, doubleB));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), expected));
+}
+
+void testBitOrArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitOrImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a | b));
+}
+
+void testBitOrArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a | b));
+}
+
+void testBitOrImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            innerBitOr,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitOr));
+
+    CHECK(compileAndRun(proc, b) == (a | (b | c)));
+}
+
+void testBitOrArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a | b));
+}
+
+void testBitOrSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(
+        proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            argument,
+            argument));
+
+    CHECK(compileAndRun(proc, a) == a);
+}
+
+void testBitOrImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a | b));
+}
+
+void testBitOrArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a | b));
+}
+
+void testBitOrImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a | b));
+}
+
+void testBitOrBitOrArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            innerBitOr,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a | b) | c));
+}
+
+void testBitOrImmBitOrArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitOr = root->appendNew(
+        proc, BitOr, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitOr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitOr));
+
+    CHECK(compileAndRun(proc, b) == (a | (b | c)));
+}
+
+void testBitXorArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            argument,
+            argument));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testBitXorImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a ^ b));
+}
+
+void testBitXorArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            innerBitXor,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm(int64_t a, int64_t b, int64_t c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitXor));
+
+    CHECK(compileAndRun(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitXorArgs32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a ^ b));
+}
+
+void testBitXorSameArg32(int a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(
+        proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            argument,
+            argument));
+
+    CHECK(!compileAndRun(proc, a));
+}
+
+void testBitXorImms32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a ^ b));
+}
+
+void testBitXorArgImm32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a ^ b));
+}
+
+void testBitXorImmArg32(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, b) == (a ^ b));
+}
+
+void testBitXorBitXorArgImmImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), b));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            innerBitXor,
+            root->appendNew(proc, Origin(), c)));
+
+    CHECK(compileAndRun(proc, a) == ((a ^ b) ^ c));
+}
+
+void testBitXorImmBitXorArgImm32(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* innerBitXor = root->appendNew(
+        proc, BitXor, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), c));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), a),
+            innerBitXor));
+
+    CHECK(compileAndRun(proc, b) == (a ^ (b ^ c)));
+}
+
+void testBitNotArg(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotImm(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), a)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotMem(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* notLoad = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        load);
+    root->appendNew(proc, Store, Origin(), notLoad, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int64_t input = a;
+    compileAndRun(proc, &input);
+    CHECK(isIdentical(input, static_cast((static_cast(a) ^ 0xffffffffffffffff))));
+}
+
+void testBitNotArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            argument));
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotImm32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitXor, Origin(),
+            root->appendNew(proc, Origin(), -1),
+            root->appendNew(proc, Origin(), a)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotMem32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* notLoad = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        load);
+    root->appendNew(proc, Store, Origin(), notLoad, address);
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    int32_t input = a;
+    compileAndRun(proc, &input);
+    CHECK(isIdentical(input, static_cast((static_cast(a) ^ 0xffffffff))));
+}
+
+void testBitNotOnBooleanAndBranch32(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argsAreEqual = root->appendNew(proc, Equal, Origin(), arg1, arg2);
+    Value* argsAreNotEqual = root->appendNew(proc, BitXor, Origin(),
+        root->appendNew(proc, Origin(), -1),
+        argsAreEqual);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        argsAreNotEqual,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -42));
+
+    int32_t expectedValue = (a != b) ? 42 : -42;
+    CHECK(compileAndRun(proc, a, b) == expectedValue);
+}
+
+void testShlArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a << b));
+}
+
+void testShlImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a << b));
+}
+
+void testShlArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a << b));
+}
+
+void testShlArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Shl, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a << a));
+}
+
+void testShlArgs32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a << b));
+}
+
+void testShlImms32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a << b));
+}
+
+void testShlArgImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a << b));
+}
+
+void testSShrArgs(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testSShrImms(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testSShrArgImm(int64_t a, int64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testSShrArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, SShr, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a >> (a & 31)));
+}
+
+void testSShrArgs32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testSShrImms32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testSShrArgImm32(int32_t a, int32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testZShrArgs(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testZShrImms(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testZShrArgImm(uint64_t a, uint64_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+void testZShrArg32(uint32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, ZShr, Origin(), value, value));
+
+    CHECK(compileAndRun(proc, a) == (a >> (a & 31)));
+}
+
+void testZShrArgs32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+    CHECK(compileAndRun(proc, a, b) == (a >> b));
+}
+
+void testZShrImms32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(proc, Origin(), a),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc) == (a >> b));
+}
+
+void testZShrArgImm32(uint32_t a, uint32_t b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), b)));
+
+    CHECK(compileAndRun(proc, a) == (a >> b));
+}
+
+template
+static unsigned countLeadingZero(IntegerType value)
+{
+    unsigned bitCount = sizeof(IntegerType) * 8;
+    if (!value)
+        return bitCount;
+
+    unsigned counter = 0;
+    while (!(static_cast(value) & (1l << (bitCount - 1)))) {
+        value <<= 1;
+        ++counter;
+    }
+    return counter;
+}
+
+void testClzArg64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* value = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), value);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, &a) == countLeadingZero(a));
+}
+
+void testClzArg32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, a) == countLeadingZero(a));
+}
+
+void testClzMem32(int32_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* value = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* clzValue = root->appendNew(proc, Clz, Origin(), value);
+    root->appendNewControlValue(proc, Return, Origin(), clzValue);
+    CHECK(compileAndRun(proc, &a) == countLeadingZero(a));
+}
+
+void testAbsArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Abs, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), fabs(a)));
+}
+
+void testAbsImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Abs, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), fabs(a)));
+}
+
+void testAbsMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Abs, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), fabs(a)));
+}
+
+void testAbsAbsArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstAbs = root->appendNew(proc, Abs, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondAbs = root->appendNew(proc, Abs, Origin(), firstAbs);
+    root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+    CHECK(isIdentical(compileAndRun(proc, a), fabs(a)));
+}
+
+void testAbsBitwiseCastArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt64 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt64);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), fabs(a)));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt64 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt64);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsDouble);
+    Value* resultAsInt64 = root->appendNew(proc, BitwiseCast, Origin(), absValue);
+
+    root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+    int64_t expectedResult = bitwise_cast(fabs(a));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), expectedResult));
+}
+
+void testAbsArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Abs, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Abs, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Abs, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsAbsArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstAbs = root->appendNew(proc, Abs, Origin(), argument);
+    Value* secondAbs = root->appendNew(proc, Abs, Origin(), firstAbs);
+    root->appendNewControlValue(proc, Return, Origin(), secondAbs);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), static_cast(fabs(a))));
+}
+
+void testAbsBitwiseCastArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsfloat = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt32);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsfloat);
+    root->appendNewControlValue(proc, Return, Origin(), absValue);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), static_cast(fabs(a))));
+}
+
+void testBitwiseCastAbsBitwiseCastArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argumentAsInt32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsfloat = root->appendNew(proc, BitwiseCast, Origin(), argumentAsInt32);
+    Value* absValue = root->appendNew(proc, Abs, Origin(), argumentAsfloat);
+    Value* resultAsInt64 = root->appendNew(proc, BitwiseCast, Origin(), absValue);
+
+    root->appendNewControlValue(proc, Return, Origin(), resultAsInt64);
+
+    int32_t expectedResult = bitwise_cast(static_cast(fabs(a)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), expectedResult));
+}
+
+void testAbsArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Abs, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(fabs(a)))));
+}
+
+void testAbsArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Abs, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(static_cast(fabs(a)))));
+    CHECK(isIdentical(effect, static_cast(fabs(a))));
+}
+
+void testCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Ceil, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testCeilImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), ceil(a)));
+}
+
+void testCeilMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), ceil(a)));
+}
+
+void testCeilCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondCeil = root->appendNew(proc, Ceil, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testFloorCeilArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* wrappingFloor = root->appendNew(proc, Floor, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(a)));
+}
+
+void testCeilIToD64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(static_cast(a))));
+}
+
+void testCeilIToD32(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Ceil, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), ceil(static_cast(a))));
+}
+
+void testCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Ceil, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(ceilf(a))));
+}
+
+void testCeilCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* secondCeil = root->appendNew(proc, Ceil, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), secondCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), ceilf(a)));
+}
+
+void testFloorCeilArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstCeil = root->appendNew(proc, Ceil, Origin(), argument);
+    Value* wrappingFloor = root->appendNew(proc, Floor, Origin(), firstCeil);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), ceilf(a)));
+}
+
+void testCeilArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Ceil, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(ceilf(a))));
+}
+
+void testCeilArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Ceil, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(ceilf(a))));
+    CHECK(isIdentical(effect, static_cast(ceilf(a))));
+}
+
+void testFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Floor, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testFloorImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), floor(a)));
+}
+
+void testFloorMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), floor(a)));
+}
+
+void testFloorFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* secondFloor = root->appendNew(proc, Floor, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testCeilFloorArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(),
+        root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    Value* wrappingCeil = root->appendNew(proc, Ceil, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(a)));
+}
+
+void testFloorIToD64(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(static_cast(a))));
+}
+
+void testFloorIToD32(int64_t a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentAsDouble = root->appendNew(proc, IToD, Origin(), argument);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Floor, Origin(), argumentAsDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, a), floor(static_cast(a))));
+}
+
+void testFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Floor, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Floor, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Floor, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(floorf(a))));
+}
+
+void testFloorFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(), argument);
+    Value* secondFloor = root->appendNew(proc, Floor, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), secondFloor);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), floorf(a)));
+}
+
+void testCeilFloorArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* firstFloor = root->appendNew(proc, Floor, Origin(), argument);
+    Value* wrappingCeil = root->appendNew(proc, Ceil, Origin(), firstFloor);
+    root->appendNewControlValue(proc, Return, Origin(), wrappingCeil);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), floorf(a)));
+}
+
+void testFloorArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Floor, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(floorf(a))));
+}
+
+void testFloorArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Floor, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(floorf(a))));
+    CHECK(isIdentical(effect, static_cast(floorf(a))));
+}
+
+void testSqrtArg(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sqrt, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0)));
+
+    CHECK(isIdentical(compileAndRun(proc, a), sqrt(a)));
+}
+
+void testSqrtImm(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sqrt, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), sqrt(a)));
+}
+
+void testSqrtMem(double a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Sqrt, Origin(), loadDouble));
+
+    CHECK(isIdentical(compileAndRun(proc, &a), sqrt(a)));
+}
+
+void testSqrtArg(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtImm(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), a);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), argument);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtMem(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), loadFloat);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), result);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, &a), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtArgWithUselessDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a)), bitwise_cast(static_cast(sqrt(a)))));
+}
+
+void testSqrtArgWithEffectfulDoubleConversion(float a)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* result = root->appendNew(proc, Sqrt, Origin(), asDouble);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), result);
+    Value* result32 = root->appendNew(proc, BitwiseCast, Origin(), floatResult);
+    Value* doubleAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew(proc, Store, Origin(), result, doubleAddress);
+    root->appendNewControlValue(proc, Return, Origin(), result32);
+
+    double effect = 0;
+    int32_t resultValue = compileAndRun(proc, bitwise_cast(a), &effect);
+    CHECK(isIdentical(resultValue, bitwise_cast(static_cast(sqrt(a)))));
+    CHECK(isIdentical(effect, static_cast(sqrt(a))));
+}
+
+void testCompareTwoFloatToDouble(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg2Float = root->appendNew(proc, BitwiseCast, Origin(), arg2As32);
+    Value* arg2AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg2Float);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+    root->appendNewControlValue(proc, Return, Origin(), equal);
+
+    CHECK(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)) == (a == b));
+}
+
+void testCompareOneFloatToDouble(float a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2AsDouble = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* equal = root->appendNew(proc, Equal, Origin(), arg1AsDouble, arg2AsDouble);
+
+    root->appendNewControlValue(proc, Return, Origin(), equal);
+
+    CHECK(compileAndRun(proc, bitwise_cast(a), b) == (a == b));
+}
+
+void testCompareFloatToDoubleThroughPhi(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    Value* arg1As32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg1Float = root->appendNew(proc, BitwiseCast, Origin(), arg1As32);
+    Value* arg1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), arg1Float);
+
+    Value* arg2AsDouble = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2AsFloat = root->appendNew(proc, DoubleToFloat, Origin(), arg2AsDouble);
+    Value* arg2AsFRoundedDouble = root->appendNew(proc, FloatToDouble, Origin(), arg2AsFloat);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), arg1AsDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* elseConst = elseCase->appendNew(proc, Origin(), 0.);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), elseConst);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+    Value* equal = tail->appendNew(proc, Equal, Origin(), doubleInput, arg2AsFRoundedDouble);
+    tail->appendNewControlValue(proc, Return, Origin(), equal);
+
+    auto code = compile(proc);
+    int32_t integerA = bitwise_cast(a);
+    double doubleB = b;
+    CHECK(invoke(*code, 1, integerA, doubleB) == (a == b));
+    CHECK(invoke(*code, 0, integerA, doubleB) == (b == 0));
+}
+
+void testDoubleToFloatThroughPhi(float value)
+{
+    // Simple case of:
+    //     if (a) {
+    //         x = DoubleAdd(a, b)
+    //     else
+    //         x = DoubleAdd(a, c)
+    //     DoubleToFloat(x)
+    //
+    // Both Adds can be converted to float add.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* argAsDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* postitiveConst = thenCase->appendNew(proc, Origin(), 42.5f);
+    Value* thenAdd = thenCase->appendNew(proc, Add, Origin(), argAsDouble, postitiveConst);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), thenAdd);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* elseConst = elseCase->appendNew(proc, Origin(), M_PI);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), elseConst);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), doubleInput);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + 42.5f));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast(M_PI)));
+}
+
+void testReduceFloatToDoubleValidates()
+{
+    // Simple case of:
+    //     f = DoubleToFloat(Bitcast(argGPR0))
+    //     if (a) {
+    //         x = FloatConst()
+    //     else
+    //         x = FloatConst()
+    //     p = Phi(x)
+    //     a = Mul(p, p)
+    //     b = Add(a, f)
+    //     c = Add(p, b)
+    //     Return(c)
+    //
+    // This should not crash in the validator after ReduceFloatToDouble.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* thingy = root->appendNew(proc, BitwiseCast, Origin(), condition);
+    thingy = root->appendNew(proc, DoubleToFloat, Origin(), thingy); // Make the phase think it has work to do.
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(),
+        thenCase->appendNew(proc, Origin(), 11.5));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), 
+        elseCase->appendNew(proc, Origin(), 10.5));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* phi =  tail->appendNew(proc, Phi, Float, Origin());
+    thenValue->setPhi(phi);
+    elseValue->setPhi(phi);
+    Value* result = tail->appendNew(proc, Mul, Origin(), 
+            phi, phi);
+    result = tail->appendNew(proc, Add, Origin(), 
+            result,
+            thingy);
+    result = tail->appendNew(proc, Add, Origin(), 
+            phi,
+            result);
+    tail->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1), 11.5f * 11.5f + static_cast(bitwise_cast(static_cast(1))) + 11.5f));
+    CHECK(isIdentical(invoke(*code, 0), 10.5f * 10.5f + static_cast(bitwise_cast(static_cast(0))) + 10.5f));
+}
+
+void testDoubleProducerPhiToFloatConversion(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), 42.5);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* finalAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), finalAdd);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + value));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), 42.5f + value));
+}
+
+void testDoubleProducerPhiToFloatConversionWithDoubleConsumer(float value)
+{
+    // In this case, the Upsilon-Phi effectively contains a Float value, but it is used
+    // as a Float and as a Double.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), 42.5);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* floatAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+
+    // FRound.
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), floatAdd);
+    Value* doubleResult = tail->appendNew(proc, FloatToDouble, Origin(), floatResult);
+
+    // This one *cannot* be eliminated
+    Value* doubleAdd = tail->appendNew(proc, Add, Origin(), doubleInput, doubleResult);
+
+    tail->appendNewControlValue(proc, Return, Origin(), doubleAdd);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), (value + value) + static_cast(value)));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast((42.5f + value) + 42.5f)));
+}
+
+void testDoubleProducerPhiWithNonFloatConst(float value, double constValue)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        condition,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* asDouble = thenCase->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    UpsilonValue* thenValue = thenCase->appendNew(proc, Origin(), asDouble);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* constDouble = elseCase->appendNew(proc, Origin(), constValue);
+    UpsilonValue* elseValue = elseCase->appendNew(proc, Origin(), constDouble);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    Value* doubleInput = tail->appendNew(proc, Phi, Double, Origin());
+    thenValue->setPhi(doubleInput);
+    elseValue->setPhi(doubleInput);
+
+    Value* argAsDoubleAgain = tail->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    Value* finalAdd = tail->appendNew(proc, Add, Origin(), doubleInput, argAsDoubleAgain);
+    Value* floatResult = tail->appendNew(proc, DoubleToFloat, Origin(), finalAdd);
+    tail->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    CHECK(isIdentical(invoke(*code, 1, bitwise_cast(value)), value + value));
+    CHECK(isIdentical(invoke(*code, 0, bitwise_cast(value)), static_cast(constValue + value)));
+}
+
+void testDoubleArgToInt64BitwiseCast(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testDoubleImmToInt64BitwiseCast(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnDouble(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnDoubleInMemory(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnDoubleInMemoryIndexed(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* scaledOffset = root->appendNew(proc, Shl, Origin(),
+        offset,
+        root->appendNew(proc, Origin(), 3));
+    Value* address = root->appendNew(proc, Add, Origin(), base, scaledOffset);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value, 0), bitwise_cast(value)));
+}
+
+void testInt64BArgToDoubleBitwiseCast(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testInt64BImmToDoubleBitwiseCast(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnInt64(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnInt64InMemory(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnInt64InMemoryIndexed(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* scaledOffset = root->appendNew(proc, Shl, Origin(),
+        offset,
+        root->appendNew(proc, Origin(), 3));
+    Value* address = root->appendNew(proc, Add, Origin(), base, scaledOffset);
+    MemoryValue* loadDouble = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadDouble);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value, 0), bitwise_cast(value)));
+}
+
+void testFloatImmToInt32BitwiseCast(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testBitwiseCastOnFloatInMemory(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadFloat);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testInt32BArgToFloatBitwiseCast(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc, value), bitwise_cast(value)));
+}
+
+void testInt32BImmToFloatBitwiseCast(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitwiseCast, Origin(), argument));
+
+    CHECK(isIdentical(compileAndRun(proc), bitwise_cast(value)));
+}
+
+void testTwoBitwiseCastOnInt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* first = root->appendNew(proc, BitwiseCast, Origin(), argument);
+    Value* second = root->appendNew(proc, BitwiseCast, Origin(), first);
+    root->appendNewControlValue(proc, Return, Origin(), second);
+
+    CHECK(isIdentical(compileAndRun(proc, value), value));
+}
+
+void testBitwiseCastOnInt32InMemory(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadFloat = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* cast = root->appendNew(proc, BitwiseCast, Origin(), loadFloat);
+    root->appendNewControlValue(proc, Return, Origin(), cast);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), bitwise_cast(value)));
+}
+
+void testConvertDoubleToFloatArg(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(value)));
+}
+
+void testConvertDoubleToFloatImm(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testConvertDoubleToFloatMem(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), loadedDouble);
+    root->appendNewControlValue(proc, Return, Origin(), asFloat);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(value)));
+}
+
+void testConvertFloatToDoubleArg(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* floatValue = root->appendNew(proc, BitwiseCast, Origin(), argument32);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), floatValue);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(value)), static_cast(value)));
+}
+
+void testConvertFloatToDoubleImm(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), value);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), argument);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testConvertFloatToDoubleMem(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedFloat = root->appendNew(proc, Load, Float, Origin(), address);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), loadedFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(value)));
+}
+
+void testConvertDoubleToFloatToDoubleToFloat(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    Value* asFloatAgain = root->appendNew(proc, DoubleToFloat, Origin(), asDouble);
+    root->appendNewControlValue(proc, Return, Origin(), asFloatAgain);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(value)));
+}
+
+void testLoadFloatConvertDoubleConvertFloatStoreFloat(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* dst = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    MemoryValue* loadedFloat = root->appendNew(proc, Load, Float, Origin(), src);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), loadedFloat);
+    Value* asFloatAgain = root->appendNew(proc, DoubleToFloat, Origin(), asDouble);
+    root->appendNew(proc, Store, Origin(), asFloatAgain, dst);
+
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    float input = value;
+    float output = 0.;
+    CHECK(!compileAndRun(proc, &input, &output));
+    CHECK(isIdentical(input, output));
+}
+
+void testFroundArg(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, value), static_cast(static_cast(value))));
+}
+
+void testFroundMem(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedDouble = root->appendNew(proc, Load, Double, Origin(), address);
+    Value* asFloat = root->appendNew(proc, DoubleToFloat, Origin(), loadedDouble);
+    Value* asDouble = root->appendNew(proc, FloatToDouble, Origin(), asFloat);
+    root->appendNewControlValue(proc, Return, Origin(), asDouble);
+
+    CHECK(isIdentical(compileAndRun(proc, &value), static_cast(static_cast(value))));
+}
+
+void testIToD64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToF64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToD32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToF32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToD64Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    int64_t inMemoryValue;
+    for (auto testValue : int64Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToF64Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int64, Origin(), address);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    int64_t inMemoryValue;
+    for (auto testValue : int64Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToD32Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsDouble);
+
+    auto code = compile(proc);
+    int32_t inMemoryValue;
+    for (auto testValue : int32Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToF32Mem()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* loadedSrc = root->appendNew(proc, Load, Int32, Origin(), address);
+    Value* srcAsFloat = root->appendNew(proc, IToF, Origin(), loadedSrc);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloat);
+
+    auto code = compile(proc);
+    int32_t inMemoryValue;
+    for (auto testValue : int32Operands()) {
+        inMemoryValue = testValue.value;
+        CHECK(isIdentical(invoke(*code, &inMemoryValue), static_cast(testValue.value)));
+        CHECK(inMemoryValue == testValue.value);
+    }
+}
+
+void testIToD64Imm(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToF64Imm(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToD32Imm(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToD, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToF32Imm(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), value);
+    Value* srcAsFloatingPoint = root->appendNew(proc, IToF, Origin(), src);
+    root->appendNewControlValue(proc, Return, Origin(), srcAsFloatingPoint);
+    CHECK(isIdentical(compileAndRun(proc), static_cast(value)));
+}
+
+void testIToDReducedToIToF64Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), srcAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    for (auto testValue : int64Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testIToDReducedToIToF32Arg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* src = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* srcAsDouble = root->appendNew(proc, IToD, Origin(), src);
+    Value* floatResult = root->appendNew(proc, DoubleToFloat, Origin(), srcAsDouble);
+    root->appendNewControlValue(proc, Return, Origin(), floatResult);
+
+    auto code = compile(proc);
+    for (auto testValue : int32Operands())
+        CHECK(isIdentical(invoke(*code, testValue.value), static_cast(testValue.value)));
+}
+
+void testStore32(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, value));
+    CHECK(slot == value);
+}
+
+void testStoreConstant(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), value),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == value);
+}
+
+void testStoreConstantPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    intptr_t slot;
+    if (is64Bit())
+        slot = (static_cast(0xbaadbeef) << 32) + static_cast(0xbaadbeef);
+    else
+        slot = 0xbaadbeef;
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), value),
+        root->appendNew(proc, Origin(), &slot));
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == value);
+}
+
+void testStore8Arg()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, 42, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, 42, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStore8Imm()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store8, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int8_t storage = 0;
+        CHECK(compileAndRun(proc, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStorePartial8BitRegisterOnX86()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // We want to have this in ECX.
+    Value* returnValue = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    // We want this suck in EDX.
+    Value* whereToStore = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+    // The patch point is there to help us force the hand of the compiler.
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+
+    // For the value above to be materialized and give the allocator
+    // a stronger insentive to name those register the way we need.
+    patchpoint->append(ConstrainedValue(returnValue, ValueRep(GPRInfo::regT3)));
+    patchpoint->append(ConstrainedValue(whereToStore, ValueRep(GPRInfo::regT2)));
+
+    // We'll produce EDI.
+    patchpoint->resultConstraint = ValueRep::reg(GPRInfo::regT6);
+
+    // Give the allocator a good reason not to use any other register.
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberSet.clear(GPRInfo::regT3);
+    clobberSet.clear(GPRInfo::regT2);
+    clobberSet.clear(GPRInfo::regT6);
+    patchpoint->clobberLate(clobberSet);
+
+    // Set EDI.
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.xor64(params[0].gpr(), params[0].gpr());
+        });
+
+    // If everything went well, we should have the big number in eax,
+    // patchpoint == EDI and whereToStore = EDX.
+    // Since EDI == 5, and AH = 5 on 8 bit store, this would go wrong
+    // if we use X86 partial registers.
+    root->appendNew(proc, Store8, Origin(), patchpoint, whereToStore);
+
+    root->appendNewControlValue(proc, Return, Origin(), returnValue);
+
+    int8_t storage = 0xff;
+    CHECK(compileAndRun(proc, 0x12345678abcdef12, &storage) == 0x12345678abcdef12);
+    CHECK(!storage);
+}
+
+void testStore16Arg()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, 42, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, 42, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testStore16Imm()
+{
+    { // Direct addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* address = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, &storage) == 42);
+        CHECK(storage == 42);
+    }
+
+    { // Indexed addressing.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* value = root->appendNew(proc, Origin(), 42);
+        Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* displacement = root->appendNew(proc, Origin(), -1);
+
+        Value* baseDisplacement = root->appendNew(proc, Add, Origin(), displacement, base);
+        Value* address = root->appendNew(proc, Add, Origin(), baseDisplacement, offset);
+
+        root->appendNew(proc, Store16, Origin(), value, address);
+        root->appendNewControlValue(proc, Return, Origin(), value);
+
+        int16_t storage = -1;
+        CHECK(compileAndRun(proc, &storage, 1) == 42);
+        CHECK(storage == 42);
+    }
+}
+
+void testTrunc(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testAdd1(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 1)));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testAdd1Ptr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), 1)));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testNeg32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == -value);
+}
+
+void testNegPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+    CHECK(compileAndRun(proc, value) == -value);
+}
+
+void testStoreAddLoad32(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 37000000000ll;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64(int64_t amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 370000000000ll;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreAddLoad32Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    int* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm32Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    int* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad8Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    int8_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm8Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int8_t slot = 37;
+    int8_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad16Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    int16_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoadImm16Index(int amount, B3::Opcode loadOpcode)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int16_t slot = 37;
+    int16_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, loadOpcode, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddLoad64Index(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 37000000000ll;
+    int64_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == 37000000000ll + amount);
+}
+
+void testStoreAddLoadImm64Index(int64_t amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int64_t slot = 370000000000ll;
+    int64_t* ptr = &slot;
+    intptr_t zero = 0;
+    Value* slotPtr = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &ptr)),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), &zero)));
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int64, Origin(), slotPtr),
+            root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == 370000000000ll + amount);
+}
+
+void testStoreSubLoad(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int32_t startValue = std::numeric_limits::min();
+    int32_t slot = startValue;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, amount));
+    CHECK(slot == startValue - amount);
+}
+
+void testStoreAddLoadInterference(int amount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    ArgumentRegValue* otherSlotPtr =
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    MemoryValue* load = root->appendNew(proc, Load, Int32, Origin(), slotPtr);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(proc, Origin(), 666),
+        otherSlotPtr);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            load, root->appendNew(proc, Origin(), amount)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc, &slot));
+    CHECK(slot == 37 + amount);
+}
+
+void testStoreAddAndLoad(int amount, int mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slot = 37;
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Load, Int32, Origin(), slotPtr),
+                root->appendNew(proc, Origin(), amount)),
+            root->appendNew(proc, Origin(), mask)),
+        slotPtr);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == ((37 + amount) & mask));
+}
+
+void testStoreNegLoad32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int32_t slot = value;
+
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Load, Int32, Origin(), slotPtr)),
+        slotPtr);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == -value);
+}
+
+void testStoreNegLoadPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    intptr_t slot = value;
+
+    ConstPtrValue* slotPtr = root->appendNew(proc, Origin(), &slot);
+    
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Sub, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(proc, Load, pointerType(), Origin(), slotPtr)),
+        slotPtr);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+    CHECK(slot == -value);
+}
+
+void testAdd1Uncommuted(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), 1),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == value + 1);
+}
+
+void testLoadOffset()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, 0),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+    CHECK(compileAndRun(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetNotConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    Value* arrayPtr = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, 0),
+            root->appendNew(proc, Load, Int32, Origin(), arrayPtr, sizeof(int))));
+
+    CHECK(compileAndRun(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAdd()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), 0))),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), sizeof(int))))));
+    
+    CHECK(compileAndRun(proc) == array[0] + array[1]);
+}
+
+void testLoadOffsetUsingAddInterference()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    ConstPtrValue* arrayPtr = root->appendNew(proc, Origin(), array);
+    ArgumentRegValue* otherArrayPtr =
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Const32Value* theNumberOfTheBeast = root->appendNew(proc, Origin(), 666);
+    MemoryValue* left = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), arrayPtr,
+            root->appendNew(proc, Origin(), 0)));
+    MemoryValue* right = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), arrayPtr,
+            root->appendNew(proc, Origin(), sizeof(int))));
+    root->appendNew(
+        proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, 0);
+    root->appendNew(
+        proc, Store, Origin(), theNumberOfTheBeast, otherArrayPtr, sizeof(int));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(), left, right));
+    
+    CHECK(compileAndRun(proc, &array[0]) == 1 + 2);
+    CHECK(array[0] == 666);
+    CHECK(array[1] == 666);
+}
+
+void testLoadOffsetUsingAddNotConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int array[] = { 1, 2 };
+    Value* arrayPtr = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), 0))),
+            root->appendNew(
+                proc, Load, Int32, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(), arrayPtr,
+                    root->appendNew(proc, Origin(), sizeof(int))))));
+    
+    CHECK(compileAndRun(proc, &array[0]) == array[0] + array[1]);
+}
+
+void testLoadAddrShift(unsigned shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int slots[2];
+
+    // Figure out which slot to use while having proper alignment for the shift.
+    int* slot;
+    uintptr_t arg;
+    for (unsigned i = sizeof(slots)/sizeof(slots[0]); i--;) {
+        slot = slots + i;
+        arg = bitwise_cast(slot) >> shift;
+        if (bitwise_cast(arg << shift) == slot)
+            break;
+    }
+
+    *slot = 8675309;
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), shift))));
+
+    CHECK(compileAndRun(proc, arg) == 8675309);
+}
+
+void testFramePointer()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, FramePointer, Origin()));
+
+    void* fp = compileAndRun(proc);
+    CHECK(fp < &proc);
+    CHECK(fp >= bitwise_cast(&proc) - 10000);
+}
+
+void testOverrideFramePointer()
+{
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        // Add a stack slot to make the frame non trivial.
+        root->appendNew(proc, Origin(), proc.addStackSlot(8));
+
+        // Sub on x86 UseDef the source. If FP is not protected correctly, it will be overridden since it is the last visible use.
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* fp = root->appendNew(proc, FramePointer, Origin());
+        Value* result = root->appendNew(proc, Sub, Origin(), fp, offset);
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        CHECK(compileAndRun(proc, 1));
+    }
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNew(proc, Origin(), proc.addStackSlot(8));
+
+        Value* offset = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* fp = root->appendNew(proc, FramePointer, Origin());
+        Value* offsetFP = root->appendNew(proc, BitAnd, Origin(), offset, fp);
+        Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* offsetArg = root->appendNew(proc, Add, Origin(), offset, arg);
+        Value* result = root->appendNew(proc, Add, Origin(), offsetArg, offsetFP);
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        CHECK(compileAndRun(proc, 1, 2));
+    }
+}
+
+void testStackSlot()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), proc.addStackSlot(1)));
+
+    void* stackSlot = compileAndRun(proc);
+    CHECK(stackSlot < &proc);
+    CHECK(stackSlot >= bitwise_cast(&proc) - 10000);
+}
+
+void testLoadFromFramePointer()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, FramePointer, Origin())));
+
+    void* fp = compileAndRun(proc);
+    void* myFP = __builtin_frame_address(0);
+    CHECK(fp <= myFP);
+    CHECK(fp >= bitwise_cast(myFP) - 10000);
+}
+
+void testStoreLoadStackSlot(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    SlotBaseValue* stack =
+        root->appendNew(proc, Origin(), proc.addStackSlot(sizeof(int)));
+
+    root->appendNew(
+        proc, Store, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        stack);
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load, Int32, Origin(), stack));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+template
+EffectiveType modelLoad(EffectiveType value)
+{
+    union {
+        EffectiveType original;
+        LoadedType loaded;
+    } u;
+
+    u.original = value;
+    if (std::is_signed::value)
+        return static_cast(u.loaded);
+    return static_cast(static_cast::type>(u.loaded));
+}
+
+template<>
+float modelLoad(float value) { return value; }
+
+template<>
+double modelLoad(double value) { return value; }
+
+template
+void testLoad(B3::Opcode opcode, InputType value)
+{
+    // Simple load from an absolute address.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), &value)));
+
+        CHECK(isIdentical(compileAndRun(proc), modelLoad(value)));
+    }
+    
+    // Simple load from an address in a register.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+
+        CHECK(isIdentical(compileAndRun(proc, &value), modelLoad(value)));
+    }
+    
+    // Simple load from an address in a register, at an offset.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                sizeof(InputType)));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 1), modelLoad(value)));
+    }
+
+    // Load from a simple base-index with various scales.
+    for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)))));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad(value)));
+    }
+
+    // Load from a simple base-index with various scales, but commuted.
+    for (unsigned logScale = 0; logScale <= 3; ++logScale) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, opcode, type, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+        CHECK(isIdentical(compileAndRun(proc, &value - 2, (sizeof(InputType) * 2) >> logScale), modelLoad(value)));
+    }
+}
+
+template
+void testLoad(B3::Opcode opcode, int32_t value)
+{
+    return testLoad(opcode, value);
+}
+
+template
+void testLoad(T value)
+{
+    return testLoad(Load, value);
+}
+
+void testStoreFloat(double input)
+{
+    // Simple store from an address in a register.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* argumentAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+
+        Value* destinationAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        root->appendNew(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+        root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+        float output = 0.;
+        CHECK(!compileAndRun(proc, input, &output));
+        CHECK(isIdentical(static_cast(input), output));
+    }
+
+    // Simple indexed store.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* argument = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* argumentAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), argument);
+
+        Value* destinationBaseAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* index = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* scaledIndex = root->appendNew(
+            proc, Shl, Origin(),
+            index,
+            root->appendNew(proc, Origin(), 2));
+        Value* destinationAddress = root->appendNew(proc, Add, Origin(), scaledIndex, destinationBaseAddress);
+
+        root->appendNew(proc, Store, Origin(), argumentAsFloat, destinationAddress);
+
+        root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+        float output = 0.;
+        CHECK(!compileAndRun(proc, input, &output - 1, 1));
+        CHECK(isIdentical(static_cast(input), output));
+    }
+}
+
+void testStoreDoubleConstantAsFloat(double input)
+{
+    // Simple store from an address in a register.
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* value = root->appendNew(proc, Origin(), input);
+    Value* valueAsFloat = root->appendNew(proc, DoubleToFloat, Origin(), value);
+
+    Value* destinationAddress = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNew(proc, Store, Origin(), valueAsFloat, destinationAddress);
+
+    root->appendNewControlValue(proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    float output = 0.;
+    CHECK(!compileAndRun(proc, input, &output));
+    CHECK(isIdentical(static_cast(input), output));
+}
+
+void testSpillGP()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector sources;
+    sources.append(root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    sources.append(root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+
+    for (unsigned i = 0; i < 30; ++i) {
+        sources.append(
+            root->appendNew(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+        );
+    }
+
+    Value* total = root->appendNew(proc, Origin(), 0);
+    for (Value* value : sources)
+        total = root->appendNew(proc, Add, Origin(), total, value);
+
+    root->appendNewControlValue(proc, Return, Origin(), total);
+    compileAndRun(proc, 1, 2);
+}
+
+void testSpillFP()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector sources;
+    sources.append(root->appendNew(proc, Origin(), FPRInfo::argumentFPR0));
+    sources.append(root->appendNew(proc, Origin(), FPRInfo::argumentFPR1));
+
+    for (unsigned i = 0; i < 30; ++i) {
+        sources.append(
+            root->appendNew(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+        );
+    }
+
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    for (Value* value : sources)
+        total = root->appendNew(proc, Add, Origin(), total, value);
+
+    root->appendNewControlValue(proc, Return, Origin(), total);
+    compileAndRun(proc, 1.1, 2.5);
+}
+
+void testInt32ToDoublePartialRegisterStall()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    // Head.
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    Value* counter = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    UpsilonValue* originalTotal = root->appendNew(proc, Origin(), total);
+    UpsilonValue* originalCounter = root->appendNew(proc, Origin(), counter);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    // Loop.
+    Value* loopCounter = loop->appendNew(proc, Phi, Int64, Origin());
+    Value* loopTotal = loop->appendNew(proc, Phi, Double, Origin());
+    originalCounter->setPhi(loopCounter);
+    originalTotal->setPhi(loopTotal);
+
+    Value* truncatedCounter = loop->appendNew(proc, Trunc, Origin(), loopCounter);
+    Value* doubleCounter = loop->appendNew(proc, IToD, Origin(), truncatedCounter);
+    Value* updatedTotal = loop->appendNew(proc, Add, Origin(), doubleCounter, loopTotal);
+    UpsilonValue* updatedTotalUpsilon = loop->appendNew(proc, Origin(), updatedTotal);
+    updatedTotalUpsilon->setPhi(loopTotal);
+
+    Value* decCounter = loop->appendNew(proc, Sub, Origin(), loopCounter, loop->appendNew(proc, Origin(), 1));
+    UpsilonValue* decCounterUpsilon = loop->appendNew(proc, Origin(), decCounter);
+    decCounterUpsilon->setPhi(loopCounter);
+    loop->appendNewControlValue(
+        proc, Branch, Origin(),
+        decCounter,
+        FrequentedBlock(loop), FrequentedBlock(done));
+
+    // Tail.
+    done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+    CHECK(isIdentical(compileAndRun(proc, 100000), 5000050000.));
+}
+
+void testInt32ToDoublePartialRegisterWithoutStall()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    // Head.
+    Value* total = root->appendNew(proc, Origin(), 0.);
+    Value* counter = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    UpsilonValue* originalTotal = root->appendNew(proc, Origin(), total);
+    UpsilonValue* originalCounter = root->appendNew(proc, Origin(), counter);
+    uint64_t forPaddingInput;
+    Value* forPaddingInputAddress = root->appendNew(proc, Origin(), &forPaddingInput);
+    uint64_t forPaddingOutput;
+    Value* forPaddingOutputAddress = root->appendNew(proc, Origin(), &forPaddingOutput);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    // Loop.
+    Value* loopCounter = loop->appendNew(proc, Phi, Int64, Origin());
+    Value* loopTotal = loop->appendNew(proc, Phi, Double, Origin());
+    originalCounter->setPhi(loopCounter);
+    originalTotal->setPhi(loopTotal);
+
+    Value* truncatedCounter = loop->appendNew(proc, Trunc, Origin(), loopCounter);
+    Value* doubleCounter = loop->appendNew(proc, IToD, Origin(), truncatedCounter);
+    Value* updatedTotal = loop->appendNew(proc, Add, Origin(), doubleCounter, loopTotal);
+
+    // Add enough padding instructions to avoid a stall.
+    Value* loadPadding = loop->appendNew(proc, Load, Int64, Origin(), forPaddingInputAddress);
+    Value* padding = loop->appendNew(proc, BitXor, Origin(), loadPadding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitXor, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitXor, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Add, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, BitOr, Origin(), padding, loopCounter);
+    padding = loop->appendNew(proc, Sub, Origin(), padding, loopCounter);
+    loop->appendNew(proc, Store, Origin(), padding, forPaddingOutputAddress);
+
+    UpsilonValue* updatedTotalUpsilon = loop->appendNew(proc, Origin(), updatedTotal);
+    updatedTotalUpsilon->setPhi(loopTotal);
+
+    Value* decCounter = loop->appendNew(proc, Sub, Origin(), loopCounter, loop->appendNew(proc, Origin(), 1));
+    UpsilonValue* decCounterUpsilon = loop->appendNew(proc, Origin(), decCounter);
+    decCounterUpsilon->setPhi(loopCounter);
+    loop->appendNewControlValue(
+        proc, Branch, Origin(),
+        decCounter,
+        FrequentedBlock(loop), FrequentedBlock(done));
+
+    // Tail.
+    done->appendNewControlValue(proc, Return, Origin(), updatedTotal);
+    CHECK(isIdentical(compileAndRun(proc, 100000), 5000050000.));
+}
+
+void testBranch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchPtr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, static_cast(42)) == 1);
+    CHECK(invoke(*code, static_cast(0)) == 0);
+}
+
+void testDiamond()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenResult = thenCase->appendNew(
+        proc, Origin(), thenCase->appendNew(proc, Origin(), 1));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    UpsilonValue* elseResult = elseCase->appendNew(
+        proc, Origin(), elseCase->appendNew(proc, Origin(), 0));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    Value* phi = done->appendNew(proc, Phi, Int32, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    done->appendNewControlValue(proc, Return, Origin(), phi);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqualCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchNotEqualNotEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(
+                proc, NotEqual, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), 0),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualEqual1()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), 1)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 0));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 1));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42) == 1);
+    CHECK(invoke(*code, 0) == 0);
+}
+
+void testBranchEqualOrUnorderedArgs(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+void testBranchEqualOrUnorderedArgs(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* equalOrUnordered = root->appendNew(
+        proc, EqualOrUnordered, Origin(),
+        argumentA,
+        argumentB);
+    Value* notEqualAndOrdered = root->appendNew(
+        proc, Equal, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        equalOrUnordered);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        notEqualAndOrdered,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a, b) == expected);
+}
+
+void testBranchNotEqualAndOrderedArgs(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* equalOrUnordered = root->appendNew(
+        proc, EqualOrUnordered, Origin(),
+        argumentA,
+        argumentB);
+    Value* notEqualAndOrdered = root->appendNew(
+        proc, Equal, Origin(),
+        root->appendNew(proc, Origin(), 0),
+        equalOrUnordered);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        notEqualAndOrdered,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (!std::isunordered(a, b) && a != b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleArgImm(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, a) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatArgImm(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a) == expected);
+}
+
+void testBranchEqualOrUnorderedDoubleImms(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatImms(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argumentA = root->appendNew(proc, Origin(), a);
+    Value* argumentB = root->appendNew(proc, Origin(), b);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argumentA,
+            argumentB),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc) == expected);
+}
+
+void testBranchEqualOrUnorderedFloatWithUselessDoubleConversion(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Load, Float, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argument1AsDouble = root->appendNew(proc, FloatToDouble, Origin(), argument1);
+    Value* argument2AsDouble = root->appendNew(proc, FloatToDouble, Origin(), argument2);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, EqualOrUnordered, Origin(),
+            argument1AsDouble,
+            argument2AsDouble),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 42));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), -13));
+
+    int64_t expected = (std::isunordered(a, b) || a == b) ? 42 : -13;
+    CHECK(compileAndRun(proc, &a, &b) == expected);
+}
+
+void testBranchFold(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), value),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testDiamondFold(int value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* done = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), value),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    UpsilonValue* thenResult = thenCase->appendNew(
+        proc, Origin(), thenCase->appendNew(proc, Origin(), 1));
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    UpsilonValue* elseResult = elseCase->appendNew(
+        proc, Origin(), elseCase->appendNew(proc, Origin(), 0));
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(done));
+
+    Value* phi = done->appendNew(proc, Phi, Int32, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    done->appendNewControlValue(proc, Return, Origin(), phi);
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testBranchNotEqualFoldPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, NotEqual, Origin(),
+            root->appendNew(proc, Origin(), value),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !!value);
+}
+
+void testBranchEqualFoldPtr(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), value),
+            root->appendNew(proc, Origin(), 0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    CHECK(compileAndRun(proc) == !value);
+}
+
+void testBranchLoadPtr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    intptr_t cond;
+    cond = 42;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int32_t cond;
+    cond = 42;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad8S()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load8S, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int8_t cond;
+    cond = -1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad8Z()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load8Z, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint8_t cond;
+    cond = 1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad16S()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load16S, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    int16_t cond;
+    cond = -1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranchLoad16Z()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Load16Z, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint16_t cond;
+    cond = 1;
+    CHECK(invoke(*code, &cond) == 1);
+    cond = 0;
+    CHECK(invoke(*code, &cond) == 0);
+}
+
+void testBranch8WithLoad8ZIndex()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    int logScale = 1;
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Above, Origin(),
+            root->appendNew(
+                proc, Load8Z, Origin(),
+                root->appendNew(
+                    proc, Add, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(
+                        proc, Shl, Origin(),
+                        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                        root->appendNew(proc, Origin(), logScale)))),
+            root->appendNew(proc, Origin(), 250)),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(proc, Origin(), 1));
+
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    uint32_t cond;
+    cond = 0xffffffffU; // All bytes are 0xff.
+    CHECK(invoke(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 1);
+    cond = 0x00000000U; // All bytes are 0.
+    CHECK(invoke(*code, &cond - 2, (sizeof(uint32_t) * 2) >> logScale) == 0);
+}
+
+void testComplex(unsigned numVars, unsigned numConstructs)
+{
+    double before = monotonicallyIncreasingTimeMS();
+    
+    Procedure proc;
+    BasicBlock* current = proc.addBlock();
+
+    Const32Value* one = current->appendNew(proc, Origin(), 1);
+
+    Vector varSlots;
+    for (unsigned i = numVars; i--;)
+        varSlots.append(i);
+
+    Vector vars;
+    for (int32_t& varSlot : varSlots) {
+        Value* varSlotPtr = current->appendNew(proc, Origin(), &varSlot);
+        vars.append(current->appendNew(proc, Load, Int32, Origin(), varSlotPtr));
+    }
+
+    for (unsigned i = 0; i < numConstructs; ++i) {
+        if (i & 1) {
+            // Control flow diamond.
+            unsigned predicateVarIndex = ((i >> 1) + 2) % numVars;
+            unsigned thenIncVarIndex = ((i >> 1) + 0) % numVars;
+            unsigned elseIncVarIndex = ((i >> 1) + 1) % numVars;
+
+            BasicBlock* thenBlock = proc.addBlock();
+            BasicBlock* elseBlock = proc.addBlock();
+            BasicBlock* continuation = proc.addBlock();
+
+            current->appendNewControlValue(
+                proc, Branch, Origin(), vars[predicateVarIndex],
+                FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+            UpsilonValue* thenThenResult = thenBlock->appendNew(
+                proc, Origin(),
+                thenBlock->appendNew(proc, Add, Origin(), vars[thenIncVarIndex], one));
+            UpsilonValue* thenElseResult = thenBlock->appendNew(
+                proc, Origin(), vars[elseIncVarIndex]);
+            thenBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            UpsilonValue* elseElseResult = elseBlock->appendNew(
+                proc, Origin(),
+                elseBlock->appendNew(proc, Add, Origin(), vars[elseIncVarIndex], one));
+            UpsilonValue* elseThenResult = elseBlock->appendNew(
+                proc, Origin(), vars[thenIncVarIndex]);
+            elseBlock->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            Value* thenPhi = continuation->appendNew(proc, Phi, Int32, Origin());
+            thenThenResult->setPhi(thenPhi);
+            elseThenResult->setPhi(thenPhi);
+            vars[thenIncVarIndex] = thenPhi;
+            
+            Value* elsePhi = continuation->appendNew(proc, Phi, Int32, Origin());
+            thenElseResult->setPhi(elsePhi);
+            elseElseResult->setPhi(elsePhi);
+            vars[elseIncVarIndex] = thenPhi;
+            
+            current = continuation;
+        } else {
+            // Loop.
+
+            BasicBlock* loopEntry = proc.addBlock();
+            BasicBlock* loopReentry = proc.addBlock();
+            BasicBlock* loopBody = proc.addBlock();
+            BasicBlock* loopExit = proc.addBlock();
+            BasicBlock* loopSkip = proc.addBlock();
+            BasicBlock* continuation = proc.addBlock();
+            
+            Value* startIndex = vars[((i >> 1) + 1) % numVars];
+            Value* startSum = current->appendNew(proc, Origin(), 0);
+            current->appendNewControlValue(
+                proc, Branch, Origin(), startIndex,
+                FrequentedBlock(loopEntry), FrequentedBlock(loopSkip));
+
+            UpsilonValue* startIndexForBody = loopEntry->appendNew(
+                proc, Origin(), startIndex);
+            UpsilonValue* startSumForBody = loopEntry->appendNew(
+                proc, Origin(), startSum);
+            loopEntry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+            Value* bodyIndex = loopBody->appendNew(proc, Phi, Int32, Origin());
+            startIndexForBody->setPhi(bodyIndex);
+            Value* bodySum = loopBody->appendNew(proc, Phi, Int32, Origin());
+            startSumForBody->setPhi(bodySum);
+            Value* newBodyIndex = loopBody->appendNew(proc, Sub, Origin(), bodyIndex, one);
+            Value* newBodySum = loopBody->appendNew(
+                proc, Add, Origin(),
+                bodySum,
+                loopBody->appendNew(
+                    proc, Load, Int32, Origin(),
+                    loopBody->appendNew(
+                        proc, Add, Origin(),
+                        loopBody->appendNew(proc, Origin(), varSlots.data()),
+                        loopBody->appendNew(
+                            proc, Shl, Origin(),
+                            loopBody->appendNew(
+                                proc, ZExt32, Origin(),
+                                loopBody->appendNew(
+                                    proc, BitAnd, Origin(),
+                                    newBodyIndex,
+                                    loopBody->appendNew(
+                                        proc, Origin(), numVars - 1))),
+                            loopBody->appendNew(proc, Origin(), 2)))));
+            loopBody->appendNewControlValue(
+                proc, Branch, Origin(), newBodyIndex,
+                FrequentedBlock(loopReentry), FrequentedBlock(loopExit));
+
+            loopReentry->appendNew(proc, Origin(), newBodyIndex, bodyIndex);
+            loopReentry->appendNew(proc, Origin(), newBodySum, bodySum);
+            loopReentry->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loopBody));
+
+            UpsilonValue* exitSum = loopExit->appendNew(proc, Origin(), newBodySum);
+            loopExit->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            UpsilonValue* skipSum = loopSkip->appendNew(proc, Origin(), startSum);
+            loopSkip->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(continuation));
+
+            Value* finalSum = continuation->appendNew(proc, Phi, Int32, Origin());
+            exitSum->setPhi(finalSum);
+            skipSum->setPhi(finalSum);
+
+            current = continuation;
+            vars[((i >> 1) + 0) % numVars] = finalSum;
+        }
+    }
+
+    current->appendNewControlValue(proc, Return, Origin(), vars[0]);
+
+    compile(proc);
+
+    double after = monotonicallyIncreasingTimeMS();
+    dataLog(toCString("    That took ", after - before, " ms.\n"));
+}
+
+void testSimplePatchpoint()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithoutOuputClobbersGPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42);
+    Value* const2 = root->appendNew(proc, Origin(), 13);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->clobberLate(RegisterSet(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1));
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[0].gpr());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params[1].gpr());
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR0);
+            jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR1);
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), arg1, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithOuputClobbersGPArgs()
+{
+    // We can't predict where the output will be but we want to be sure it is not
+    // one of the clobbered registers which is a bit hard to test.
+    //
+    // What we do is force the hand of our register allocator by clobbering absolutely
+    // everything but 1. The only valid allocation is to give it to the result and
+    // spill everything else.
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42);
+    Value* const2 = root->appendNew(proc, Origin(), 13);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Int64, Origin());
+
+    RegisterSet clobberAll = RegisterSet::allGPRs();
+    clobberAll.exclude(RegisterSet::stackRegisters());
+    clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberAll.clear(GPRInfo::argumentGPR2);
+    patchpoint->clobberLate(clobberAll);
+
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.add64(params[2].gpr(), params[0].gpr());
+
+            clobberAll.forEach([&] (Reg reg) {
+                jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), reg.gpr());
+            });
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), patchpoint,
+        root->appendNew(proc, Add, Origin(), arg1, arg2));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1, 2) == 58);
+}
+
+void testSimplePatchpointWithoutOuputClobbersFPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42.5);
+    Value* const2 = root->appendNew(proc, Origin(), 13.1);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->clobberLate(RegisterSet(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1));
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isFPR());
+            CHECK(params[1].isFPR());
+            jit.moveZeroToDouble(params[0].fpr());
+            jit.moveZeroToDouble(params[1].fpr());
+            jit.moveZeroToDouble(FPRInfo::argumentFPR0);
+            jit.moveZeroToDouble(FPRInfo::argumentFPR1);
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), arg1, arg2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1.5, 2.5) == 4);
+}
+
+void testSimplePatchpointWithOuputClobbersFPArgs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+    Value* const1 = root->appendNew(proc, Origin(), 42.5);
+    Value* const2 = root->appendNew(proc, Origin(), 13.1);
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+
+    RegisterSet clobberAll = RegisterSet::allFPRs();
+    clobberAll.exclude(RegisterSet::stackRegisters());
+    clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberAll.clear(FPRInfo::argumentFPR2);
+    patchpoint->clobberLate(clobberAll);
+
+    patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isFPR());
+            CHECK(params[1].isFPR());
+            CHECK(params[2].isFPR());
+            jit.addDouble(params[1].fpr(), params[2].fpr(), params[0].fpr());
+
+            clobberAll.forEach([&] (Reg reg) {
+                jit.moveZeroToDouble(reg.fpr());
+            });
+        });
+
+    Value* result = root->appendNew(proc, Add, Origin(), patchpoint,
+        root->appendNew(proc, Add, Origin(), arg1, arg2));
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, 1.5, 2.5) == 59.6);
+}
+
+void testPatchpointWithEarlyClobber()
+{
+    auto test = [] (GPRReg registerToClobber, bool arg1InArgGPR, bool arg2InArgGPR) {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        
+        PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+        patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        patchpoint->clobberEarly(RegisterSet(registerToClobber));
+        patchpoint->setGenerator(
+            [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                CHECK((params[1].gpr() == GPRInfo::argumentGPR0) == arg1InArgGPR);
+                CHECK((params[2].gpr() == GPRInfo::argumentGPR1) == arg2InArgGPR);
+                
+                add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+            });
+
+        root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+        CHECK(compileAndRun(proc, 1, 2) == 3);
+    };
+
+    test(GPRInfo::nonArgGPR0, true, true);
+    test(GPRInfo::argumentGPR0, false, true);
+    test(GPRInfo::argumentGPR1, true, false);
+}
+
+void testPatchpointCallArg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::stackArgument(0)));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::stackArgument(8)));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isStack());
+            CHECK(params[2].isStack());
+            jit.load32(
+                CCallHelpers::Address(GPRInfo::callFrameRegister, params[1].offsetFromFP()),
+                params[0].gpr());
+            jit.add32(
+                CCallHelpers::Address(GPRInfo::callFrameRegister, params[2].offsetFromFP()),
+                params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointFixedRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep(GPRInfo::regT0)));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep(GPRInfo::regT1)));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1] == ValueRep(GPRInfo::regT0));
+            CHECK(params[2] == ValueRep(GPRInfo::regT1));
+            add32(jit, GPRInfo::regT0, GPRInfo::regT1, params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointAny(ValueRep rep)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, rep));
+    patchpoint->append(ConstrainedValue(arg2, rep));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointGPScratch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(arg1, ValueRep::SomeRegister);
+    patchpoint->append(arg2, ValueRep::SomeRegister);
+    patchpoint->numGPScratchRegisters = 2;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            CHECK(params.gpScratch(0) != InvalidGPRReg);
+            CHECK(params.gpScratch(0) != params[0].gpr());
+            CHECK(params.gpScratch(0) != params[1].gpr());
+            CHECK(params.gpScratch(0) != params[2].gpr());
+            CHECK(params.gpScratch(1) != InvalidGPRReg);
+            CHECK(params.gpScratch(1) != params.gpScratch(0));
+            CHECK(params.gpScratch(1) != params[0].gpr());
+            CHECK(params.gpScratch(1) != params[1].gpr());
+            CHECK(params.gpScratch(1) != params[2].gpr());
+            CHECK(!params.unavailableRegisters().get(params.gpScratch(0)));
+            CHECK(!params.unavailableRegisters().get(params.gpScratch(1)));
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointFPScratch()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(arg1, ValueRep::SomeRegister);
+    patchpoint->append(arg2, ValueRep::SomeRegister);
+    patchpoint->numFPScratchRegisters = 2;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            CHECK(params.fpScratch(0) != InvalidFPRReg);
+            CHECK(params.fpScratch(1) != InvalidFPRReg);
+            CHECK(params.fpScratch(1) != params.fpScratch(0));
+            CHECK(!params.unavailableRegisters().get(params.fpScratch(0)));
+            CHECK(!params.unavailableRegisters().get(params.fpScratch(1)));
+            add32(jit, params[1].gpr(), params[2].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointLotsOfLateAnys()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Vector things;
+    for (unsigned i = 200; i--;)
+        things.append(i);
+
+    Vector values;
+    for (int& thing : things) {
+        Value* value = root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(proc, Origin(), &thing));
+        values.append(value);
+    }
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    for (Value* value : values)
+        patchpoint->append(ConstrainedValue(value, ValueRep::LateColdAny));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            // We shouldn't have spilled the inputs, so we assert that they're in registers.
+            CHECK(params.size() == things.size() + 1);
+            CHECK(params[0].isGPR());
+            jit.move(CCallHelpers::TrustedImm32(0), params[0].gpr());
+            for (unsigned i = 1; i < params.size(); ++i) {
+                if (params[i].isGPR()) {
+                    CHECK(params[i] != params[0]);
+                    jit.add32(params[i].gpr(), params[0].gpr());
+                } else {
+                    CHECK(params[i].isStack());
+                    jit.add32(CCallHelpers::Address(GPRInfo::callFrameRegister, params[i].offsetFromFP()), params[0].gpr());
+                }
+            }
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc) == (things.size() * (things.size() - 1)) / 2);
+}
+
+void testPatchpointAnyImm(ValueRep rep)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, rep));
+    patchpoint->append(ConstrainedValue(arg2, rep));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isConstant());
+            CHECK(params[2].value() == 42);
+            jit.add32(
+                CCallHelpers::TrustedImm32(static_cast(params[2].value())),
+                params[1].gpr(), params[0].gpr());
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1) == 43);
+}
+
+void testPatchpointManyImms()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 42);
+    Value* arg2 = root->appendNew(proc, Origin(), 43);
+    Value* arg3 = root->appendNew(proc, Origin(), 43000000000000ll);
+    Value* arg4 = root->appendNew(proc, Origin(), 42.5);
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg3, ValueRep::WarmAny));
+    patchpoint->append(ConstrainedValue(arg4, ValueRep::WarmAny));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+            CHECK(params.size() == 4);
+            CHECK(params[0] == ValueRep::constant(42));
+            CHECK(params[1] == ValueRep::constant(43));
+            CHECK(params[2] == ValueRep::constant(43000000000000ll));
+            CHECK(params[3] == ValueRep::constant(bitwise_cast(42.5)));
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    CHECK(!compileAndRun(proc));
+}
+
+void testPatchpointWithRegisterResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::reg(GPRInfo::nonArgGPR0);
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0] == ValueRep::reg(GPRInfo::nonArgGPR0));
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::nonArgGPR0);
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithStackArgumentResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::stackArgument(0);
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0] == ValueRep::stack(-static_cast(proc.frameSize())));
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            jit.add32(params[1].gpr(), params[2].gpr(), jit.scratchRegister());
+            jit.store32(jit.scratchRegister(), CCallHelpers::Address(CCallHelpers::stackPointerRegister, 0));
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testPatchpointWithAnyResult()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+    patchpoint->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    patchpoint->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    patchpoint->resultConstraint = ValueRep::WarmAny;
+    patchpoint->clobberLate(RegisterSet::allFPRs());
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->clobber(RegisterSet(GPRInfo::regT0));
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 3);
+            CHECK(params[0].isStack());
+            CHECK(params[1].isGPR());
+            CHECK(params[2].isGPR());
+            add32(jit, params[1].gpr(), params[2].gpr(), GPRInfo::regT0);
+            jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
+            jit.storeDouble(FPRInfo::fpRegT0, CCallHelpers::Address(GPRInfo::callFrameRegister, params[0].offsetFromFP()));
+        });
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    CHECK(compileAndRun(proc, 1, 2) == 3);
+}
+
+void testSimpleCheck()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* check = root->appendNew(proc, Check, Origin(), arg);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code, 0) == 0);
+    CHECK(invoke(*code, 1) == 42);
+}
+
+void testCheckFalse()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(), root->appendNew(proc, Origin(), 0));
+    check->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"This should not have executed");
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code) == 0);
+}
+
+void testCheckTrue()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(), root->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.value()->opcode() == Patchpoint);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckLessThan()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(), arg,
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    
+    CHECK(invoke(*code, 42) == 0);
+    CHECK(invoke(*code, 1000) == 0);
+    CHECK(invoke(*code, 41) == 42);
+    CHECK(invoke(*code, 0) == 42);
+    CHECK(invoke(*code, -1) == 42);
+}
+
+void testCheckMegaCombo()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+    
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(),
+            root->appendNew(proc, Load8S, Origin(), ptr),
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTrickyMegaCombo()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)),
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+    
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, LessThan, Origin(),
+            root->appendNew(proc, Load8S, Origin(), ptr),
+            root->appendNew(proc, Origin(), 42)));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 0) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 0) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 0) == 42);
+}
+
+void testCheckTwoMegaCombos()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* predicate = root->appendNew(
+        proc, LessThan, Origin(),
+        root->appendNew(proc, Load8S, Origin(), ptr),
+        root->appendNew(proc, Origin(), 42));
+    
+    CheckValue* check = root->appendNew(proc, Check, Origin(), predicate);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    CheckValue* check2 = root->appendNew(proc, Check, Origin(), predicate);
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(43), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(), root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+
+    int8_t value;
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1) == 0);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1) == 42);
+}
+
+void testCheckTwoNonRedundantMegaCombos()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    
+    Value* base = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* index = root->appendNew(
+        proc, ZExt32, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    Value* branchPredicate = root->appendNew(
+        proc, BitAnd, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+        root->appendNew(proc, Origin(), 0xff));
+
+    Value* ptr = root->appendNew(
+        proc, Add, Origin(), base,
+        root->appendNew(
+            proc, Shl, Origin(), index,
+            root->appendNew(proc, Origin(), 1)));
+
+    Value* checkPredicate = root->appendNew(
+        proc, LessThan, Origin(),
+        root->appendNew(proc, Load8S, Origin(), ptr),
+        root->appendNew(proc, Origin(), 42));
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(), branchPredicate,
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+    
+    CheckValue* check = thenCase->appendNew(proc, Check, Origin(), checkPredicate);
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(), thenCase->appendNew(proc, Origin(), 43));
+
+    CheckValue* check2 = elseCase->appendNew(proc, Check, Origin(), checkPredicate);
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(!params.size());
+
+            // This should always work because a function this simple should never have callee
+            // saves.
+            jit.move(CCallHelpers::TrustedImm32(44), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(), elseCase->appendNew(proc, Origin(), 45));
+
+    auto code = compile(proc);
+
+    int8_t value;
+
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1, true) == 43);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1, true) == 43);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1, true) == 42);
+
+    value = 42;
+    CHECK(invoke(*code, &value - 2, 1, false) == 45);
+    value = 127;
+    CHECK(invoke(*code, &value - 2, 1, false) == 45);
+    value = 41;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+    value = 0;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+    value = -1;
+    CHECK(invoke(*code, &value - 2, 1, false) == 44);
+}
+
+void testCheckAddImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->append(arg1);
+    checkAdd->append(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmCommute()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg2, arg1);
+    checkAdd->append(arg1);
+    checkAdd->append(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAddImmSomeRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 42.0);
+    CHECK(invoke(*code, 1) == 43.0);
+    CHECK(invoke(*code, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647) == 2147483689.0);
+}
+
+void testCheckAdd()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 42.0);
+    CHECK(invoke(*code, 1, 42) == 43.0);
+    CHECK(invoke(*code, 42, 42) == 84.0);
+    CHECK(invoke(*code, 2147483647, 42) == 2147483689.0);
+}
+
+void testCheckAdd64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->appendSomeRegister(arg1);
+    checkAdd->appendSomeRegister(arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.addDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkAdd));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == 42.0);
+    CHECK(invoke(*code, 1ll, 42ll) == 43.0);
+    CHECK(invoke(*code, 42ll, 42ll) == 84.0);
+    CHECK(invoke(*code, 9223372036854775807ll, 42ll) == static_cast(9223372036854775807ll) + 42.0);
+}
+
+void testCheckAddFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a + b);
+}
+
+void testCheckAddFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckAddArgumentAliasing64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg3 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkAdd1 = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg2).
+    CheckValue* checkAdd2 = root->appendNew(proc, CheckAdd, Origin(), arg3, arg2);
+    checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkAdd1 and checkAdd2.
+    CheckValue* checkAdd3 = root->appendNew(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+    checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+    CHECK(compileAndRun(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddArgumentAliasing32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg3 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkAdd1 = root->appendNew(proc, CheckAdd, Origin(), arg1, arg2);
+    checkAdd1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg3).
+    CheckValue* checkAdd2 = root->appendNew(proc, CheckAdd, Origin(), arg2, arg3);
+    checkAdd2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkAdd1 and checkAdd2.
+    CheckValue* checkAdd3 = root->appendNew(proc, CheckAdd, Origin(), checkAdd1, checkAdd2);
+    checkAdd3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd3);
+
+    CHECK(compileAndRun(proc, 1, 2, 3) == 8);
+}
+
+void testCheckAddSelfOverflow64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg, arg);
+    checkAdd->append(arg);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+
+    // Make sure the arg is not the destination of the operation.
+    PatchpointValue* opaqueUse = root->appendNew(proc, Void, Origin());
+    opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+    opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0);
+    CHECK(invoke(*code, 1ll) == 2);
+    CHECK(invoke(*code, std::numeric_limits::max()) == std::numeric_limits::max());
+}
+
+void testCheckAddSelfOverflow32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* checkAdd = root->appendNew(proc, CheckAdd, Origin(), arg, arg);
+    checkAdd->append(arg);
+    checkAdd->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(params[0].gpr(), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+
+    // Make sure the arg is not the destination of the operation.
+    PatchpointValue* opaqueUse = root->appendNew(proc, Void, Origin());
+    opaqueUse->append(ConstrainedValue(arg, ValueRep::SomeRegister));
+    opaqueUse->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkAdd);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0);
+    CHECK(invoke(*code, 1ll) == 2);
+    CHECK(invoke(*code, std::numeric_limits::max()) == std::numeric_limits::max());
+}
+
+void testCheckSubImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 42);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 42);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(42), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == -42.0);
+    CHECK(invoke(*code, 1) == -41.0);
+    CHECK(invoke(*code, 42) == 0.0);
+    CHECK(invoke(*code, -2147483647) == -2147483689.0);
+}
+
+void testCheckSubBadImm()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    int32_t badImm = std::numeric_limits::min();
+    Value* arg2 = root->appendNew(proc, Origin(), badImm);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+
+            if (params[1].isConstant()) {
+                CHECK(params[1].value() == badImm);
+                jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(badImm), FPRInfo::fpRegT1);
+            } else {
+                CHECK(params[1].isGPR());
+                jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            }
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == -static_cast(badImm));
+    CHECK(invoke(*code, -1) == -static_cast(badImm) - 1);
+    CHECK(invoke(*code, 1) == -static_cast(badImm) + 1);
+    CHECK(invoke(*code, 42) == -static_cast(badImm) + 42);
+}
+
+void testCheckSub()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == -42.0);
+    CHECK(invoke(*code, 1, 42) == -41.0);
+    CHECK(invoke(*code, 42, 42) == 0.0);
+    CHECK(invoke(*code, -2147483647, 42) == -2147483689.0);
+}
+
+NEVER_INLINE double doubleSub(double a, double b)
+{
+    return a - b;
+}
+
+void testCheckSub64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->append(arg1);
+    checkSub->append(arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.subDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkSub));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == -42.0);
+    CHECK(invoke(*code, 1ll, 42ll) == -41.0);
+    CHECK(invoke(*code, 42ll, 42ll) == 0.0);
+    CHECK(invoke(*code, -9223372036854775807ll, 42ll) == doubleSub(static_cast(-9223372036854775807ll), 42.0));
+}
+
+void testCheckSubFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a - b);
+}
+
+void testCheckSubFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkSub = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkSub->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkSub);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckNeg()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 0);
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    CheckValue* checkNeg = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkNeg->append(arg2);
+    checkNeg->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 1);
+            CHECK(params[0].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+            jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkNeg));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 0.0);
+    CHECK(invoke(*code, 1) == -1.0);
+    CHECK(invoke(*code, 42) == -42.0);
+    CHECK(invoke(*code, -2147483647 - 1) == 2147483648.0);
+}
+
+void testCheckNeg64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), 0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    CheckValue* checkNeg = root->appendNew(proc, CheckSub, Origin(), arg1, arg2);
+    checkNeg->append(arg2);
+    checkNeg->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 1);
+            CHECK(params[0].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT1);
+            jit.negateDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkNeg));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll) == 0.0);
+    CHECK(invoke(*code, 1ll) == -1.0);
+    CHECK(invoke(*code, 42ll) == -42.0);
+    CHECK(invoke(*code, -9223372036854775807ll - 1) == 9223372036854775808.0);
+}
+
+void testCheckMul()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 0.0);
+    CHECK(invoke(*code, 1, 42) == 42.0);
+    CHECK(invoke(*code, 42, 42) == 42.0 * 42.0);
+    CHECK(invoke(*code, 2147483647, 42) == 2147483647.0 * 42.0);
+}
+
+void testCheckMulMemory()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    int left;
+    int right;
+    
+    Value* arg1 = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(proc, Origin(), &left));
+    Value* arg2 = root->appendNew(
+        proc, Load, Int32, Origin(),
+        root->appendNew(proc, Origin(), &right));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    left = 0;
+    right = 42;
+    CHECK(invoke(*code) == 0.0);
+    
+    left = 1;
+    right = 42;
+    CHECK(invoke(*code) == 42.0);
+
+    left = 42;
+    right = 42;
+    CHECK(invoke(*code) == 42.0 * 42.0);
+
+    left = 2147483647;
+    right = 42;
+    CHECK(invoke(*code) == 2147483647.0 * 42.0);
+}
+
+void testCheckMul2()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), 2);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isConstant());
+            CHECK(params[1].value() == 2);
+            jit.convertInt32ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt32ToDouble(CCallHelpers::TrustedImm32(2), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0) == 0.0);
+    CHECK(invoke(*code, 1) == 2.0);
+    CHECK(invoke(*code, 42) == 42.0 * 2.0);
+    CHECK(invoke(*code, 2147483647) == 2147483647.0 * 2.0);
+}
+
+void testCheckMul64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0, 42) == 0.0);
+    CHECK(invoke(*code, 1, 42) == 42.0);
+    CHECK(invoke(*code, 42, 42) == 42.0 * 42.0);
+    CHECK(invoke(*code, 9223372036854775807ll, 42) == static_cast(9223372036854775807ll) * 42.0);
+}
+
+void testCheckMulFold(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should have been folded");
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == a * b);
+}
+
+void testCheckMulFoldFail(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), a);
+    Value* arg2 = root->appendNew(proc, Origin(), b);
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), checkMul);
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code) == 42);
+}
+
+void testCheckMulArgumentAliasing64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* arg3 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkMul1 = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg2).
+    CheckValue* checkMul2 = root->appendNew(proc, CheckMul, Origin(), arg3, arg2);
+    checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkMul1 and checkMul2.
+    CheckValue* checkMul3 = root->appendNew(proc, CheckMul, Origin(), checkMul1, checkMul2);
+    checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+    CHECK(compileAndRun(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMulArgumentAliasing32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* arg3 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+
+    // Pretend to use all the args.
+    PatchpointValue* useArgs = root->appendNew(proc, Void, Origin());
+    useArgs->append(ConstrainedValue(arg1, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    useArgs->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+    useArgs->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Last use of first arg (here, arg1).
+    CheckValue* checkMul1 = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul1->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Last use of second arg (here, arg3).
+    CheckValue* checkMul2 = root->appendNew(proc, CheckMul, Origin(), arg2, arg3);
+    checkMul2->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    // Keep arg3 live.
+    PatchpointValue* keepArg2Live = root->appendNew(proc, Void, Origin());
+    keepArg2Live->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+    keepArg2Live->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+    // Only use of checkMul1 and checkMul2.
+    CheckValue* checkMul3 = root->appendNew(proc, CheckMul, Origin(), checkMul1, checkMul2);
+    checkMul3->setGenerator([&] (CCallHelpers& jit, const StackmapGenerationParams&) { jit.oops(); });
+
+    root->appendNewControlValue(proc, Return, Origin(), checkMul3);
+
+    CHECK(compileAndRun(proc, 2, 3, 4) == 72);
+}
+
+void testCheckMul64SShr()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(
+        proc, SShr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(proc, Origin(), 1));
+    Value* arg2 = root->appendNew(
+        proc, SShr, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+        root->appendNew(proc, Origin(), 1));
+    CheckValue* checkMul = root->appendNew(proc, CheckMul, Origin(), arg1, arg2);
+    checkMul->append(arg1);
+    checkMul->append(arg2);
+    checkMul->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params.size() == 2);
+            CHECK(params[0].isGPR());
+            CHECK(params[1].isGPR());
+            jit.convertInt64ToDouble(params[0].gpr(), FPRInfo::fpRegT0);
+            jit.convertInt64ToDouble(params[1].gpr(), FPRInfo::fpRegT1);
+            jit.mulDouble(FPRInfo::fpRegT1, FPRInfo::fpRegT0);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, IToD, Origin(), checkMul));
+
+    auto code = compile(proc);
+
+    CHECK(invoke(*code, 0ll, 42ll) == 0.0);
+    CHECK(invoke(*code, 1ll, 42ll) == 0.0);
+    CHECK(invoke(*code, 42ll, 42ll) == (42.0 / 2.0) * (42.0 / 2.0));
+    CHECK(invoke(*code, 10000000000ll, 10000000000ll) == 25000000000000000000.0);
+}
+
+template
+void genericTestCompare(
+    B3::Opcode opcode, const LeftFunctor& leftFunctor, const RightFunctor& rightFunctor,
+    InputType left, InputType right, int result)
+{
+    // Using a compare.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* leftValue = leftFunctor(root, proc);
+        Value* rightValue = rightFunctor(root, proc);
+        Value* comparisonResult = root->appendNew(proc, opcode, Origin(), leftValue, rightValue);
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, NotEqual, Origin(),
+                comparisonResult,
+                root->appendIntConstant(proc, Origin(), comparisonResult->type(), 0)));
+
+        CHECK(compileAndRun(proc, left, right) == result);
+    }
+    
+    // Using a branch.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        BasicBlock* thenCase = proc.addBlock();
+        BasicBlock* elseCase = proc.addBlock();
+
+        Value* leftValue = leftFunctor(root, proc);
+        Value* rightValue = rightFunctor(root, proc);
+
+        root->appendNewControlValue(
+            proc, Branch, Origin(),
+            root->appendNew(proc, opcode, Origin(), leftValue, rightValue),
+            FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+        // We use a patchpoint on the then case to ensure that this doesn't get if-converted.
+        PatchpointValue* patchpoint = thenCase->appendNew(proc, Int32, Origin());
+        patchpoint->setGenerator(
+            [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                AllowMacroScratchRegisterUsage allowScratch(jit);
+                CHECK(params.size() == 1);
+                CHECK(params[0].isGPR());
+                jit.move(CCallHelpers::TrustedImm32(1), params[0].gpr());
+            });
+        thenCase->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+        elseCase->appendNewControlValue(
+            proc, Return, Origin(),
+            elseCase->appendNew(proc, Origin(), 0));
+
+        CHECK(compileAndRun(proc, left, right) == result);
+    }
+}
+
+template
+InputType modelCompare(B3::Opcode opcode, InputType left, InputType right)
+{
+    switch (opcode) {
+    case Equal:
+        return left == right;
+    case NotEqual:
+        return left != right;
+    case LessThan:
+        return left < right;
+    case GreaterThan:
+        return left > right;
+    case LessEqual:
+        return left <= right;
+    case GreaterEqual:
+        return left >= right;
+    case Above:
+        return static_cast::type>(left) >
+            static_cast::type>(right);
+    case Below:
+        return static_cast::type>(left) <
+            static_cast::type>(right);
+    case AboveEqual:
+        return static_cast::type>(left) >=
+            static_cast::type>(right);
+    case BelowEqual:
+        return static_cast::type>(left) <=
+            static_cast::type>(right);
+    case BitAnd:
+        return !!(left & right);
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+template
+void testCompareLoad(B3::Opcode opcode, B3::Opcode loadOpcode, int left, int right)
+{
+    int result = modelCompare(opcode, modelLoad(left), right);
+    
+    // Test addr-to-tmp
+    int slot = left;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, result);
+
+    // Test addr-to-imm
+    slot = left;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+
+    result = modelCompare(opcode, left, modelLoad(right));
+    
+    // Test tmp-to-addr
+    slot = right;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        left, right, result);
+
+    // Test imm-to-addr
+    slot = right;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+        },
+        left, right, result);
+
+    // Test addr-to-addr, with the same addr.
+    slot = left;
+    Value* value;
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            value = block->appendNew(
+                proc, loadOpcode, Int32, Origin(),
+                block->appendNew(proc, Origin(), &slot));
+            return value;
+        },
+        [&] (BasicBlock*, Procedure&) {
+            return value;
+        },
+        left, left, modelCompare(opcode, modelLoad(left), modelLoad(left)));
+}
+
+void testCompareImpl(B3::Opcode opcode, int64_t left, int64_t right)
+{
+    int64_t result = modelCompare(opcode, left, right);
+    int32_t int32Result = modelCompare(opcode, static_cast(left), static_cast(right));
+    
+    // Test tmp-to-tmp.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, int32Result);
+
+    // Test imm-to-tmp.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+        },
+        left, right, int32Result);
+
+    // Test tmp-to-imm.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(
+                proc, Trunc, Origin(),
+                block->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, int32Result);
+
+    // Test imm-to-imm.
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, result);
+    genericTestCompare(
+        opcode,
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), left);
+        },
+        [&] (BasicBlock* block, Procedure& proc) {
+            return block->appendNew(proc, Origin(), right);
+        },
+        left, right, int32Result);
+
+    testCompareLoad(opcode, Load, left, right);
+    testCompareLoad(opcode, Load8S, left, right);
+    testCompareLoad(opcode, Load8Z, left, right);
+    testCompareLoad(opcode, Load16S, left, right);
+    testCompareLoad(opcode, Load16Z, left, right);
+}
+
+void testCompare(B3::Opcode opcode, int64_t left, int64_t right)
+{
+    testCompareImpl(opcode, left, right);
+    testCompareImpl(opcode, left, right + 1);
+    testCompareImpl(opcode, left, right - 1);
+}
+
+void testEqualDouble(double left, double right, bool result)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Equal, Origin(),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(compileAndRun(proc, left, right) == result);
+}
+
+int simpleFunction(int a, int b)
+{
+    return a + b;
+}
+
+void testCallSimple(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Int32, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+void testCallRare(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* common = proc.addBlock();
+    BasicBlock* rare = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(rare, FrequencyClass::Rare),
+        FrequentedBlock(common));
+
+    common->appendNewControlValue(
+        proc, Return, Origin(), common->appendNew(proc, Origin(), 0));
+    
+    rare->appendNewControlValue(
+        proc, Return, Origin(),
+        rare->appendNew(
+            proc, Int32, Origin(),
+            rare->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            rare->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            rare->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    CHECK(compileAndRun(proc, true, a, b) == a + b);
+}
+
+void testCallRareLive(int a, int b, int c)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* common = proc.addBlock();
+    BasicBlock* rare = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        FrequentedBlock(rare, FrequencyClass::Rare),
+        FrequentedBlock(common));
+
+    common->appendNewControlValue(
+        proc, Return, Origin(), common->appendNew(proc, Origin(), 0));
+    
+    rare->appendNewControlValue(
+        proc, Return, Origin(),
+        rare->appendNew(
+            proc, Add, Origin(),
+            rare->appendNew(
+                proc, Int32, Origin(),
+                rare->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+            rare->appendNew(
+                proc, Trunc, Origin(),
+                rare->appendNew(proc, Origin(), GPRInfo::argumentGPR3))));
+
+    CHECK(compileAndRun(proc, true, a, b, c) == a + b + c);
+}
+
+void testCallSimplePure(int a, int b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Int32, Origin(), Effects::none(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunction)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+int functionWithHellaArguments(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m, int n, int o, int p, int q, int r, int s, int t, int u, int v, int w, int x, int y, int z)
+{
+    return (a << 0) + (b << 1) + (c << 2) + (d << 3) + (e << 4) + (f << 5) + (g << 6) + (h << 7) + (i << 8) + (j << 9) + (k << 10) + (l << 11) + (m << 12) + (n << 13) + (o << 14) + (p << 15) + (q << 16) + (r << 17) + (s << 18) + (t << 19) + (u << 20) + (v << 21) + (w << 22) + (x << 23) + (y << 24) + (z << 25);
+}
+
+void testCallFunctionWithHellaArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Int32, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testReturnDouble(double value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(isIdentical(compileAndRun(proc), value));
+}
+
+void testReturnFloat(float value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), value));
+
+    CHECK(isIdentical(compileAndRun(proc), value));
+}
+
+double simpleFunctionDouble(double a, double b)
+{
+    return a + b;
+}
+
+void testCallSimpleDouble(double a, double b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Double, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunctionDouble)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(compileAndRun(proc, a, b) == a + b);
+}
+
+float simpleFunctionFloat(float a, float b)
+{
+    return a + b;
+}
+
+void testCallSimpleFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Float, Origin(),
+            root->appendNew(proc, Origin(), bitwise_cast(simpleFunctionFloat)),
+            floatValue1,
+            floatValue2));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b)), a + b));
+}
+
+double functionWithHellaDoubleArguments(double a, double b, double c, double d, double e, double f, double g, double h, double i, double j, double k, double l, double m, double n, double o, double p, double q, double r, double s, double t, double u, double v, double w, double x, double y, double z)
+{
+    return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaDoubleArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Double, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaDoubleArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaDoubleArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+float functionWithHellaFloatArguments(float a, float b, float c, float d, float e, float f, float g, float h, float i, float j, float k, float l, float m, float n, float o, float p, float q, float r, float s, float t, float u, float v, float w, float x, float y, float z)
+{
+    return a * pow(2, 0) + b * pow(2, 1) + c * pow(2, 2) + d * pow(2, 3) + e * pow(2, 4) + f * pow(2, 5) + g * pow(2, 6) + h * pow(2, 7) + i * pow(2, 8) + j * pow(2, 9) + k * pow(2, 10) + l * pow(2, 11) + m * pow(2, 12) + n * pow(2, 13) + o * pow(2, 14) + p * pow(2, 15) + q * pow(2, 16) + r * pow(2, 17) + s * pow(2, 18) + t * pow(2, 19) + u * pow(2, 20) + v * pow(2, 21) + w * pow(2, 22) + x * pow(2, 23) + y * pow(2, 24) + z * pow(2, 25);
+}
+
+void testCallFunctionWithHellaFloatArguments()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector args;
+    for (unsigned i = 0; i < 26; ++i)
+        args.append(root->appendNew(proc, Origin(), i + 1));
+
+    CCallValue* call = root->appendNew(
+        proc, Float, Origin(),
+        root->appendNew(proc, Origin(), bitwise_cast(functionWithHellaFloatArguments)));
+    call->children().appendVector(args);
+    
+    root->appendNewControlValue(proc, Return, Origin(), call);
+
+    CHECK(compileAndRun(proc) == functionWithHellaFloatArguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26));
+}
+
+void testChillDiv(int num, int den, int res)
+{
+    // Test non-constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+
+        CHECK(compileAndRun(proc, num, den) == res);
+    }
+
+    // Test constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), num),
+                root->appendNew(proc, Origin(), den)));
+        
+        CHECK(compileAndRun(proc) == res);
+    }
+}
+
+void testChillDivTwice(int num1, int den1, int num2, int den2, int res)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1))),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)))));
+    
+    CHECK(compileAndRun(proc, num1, den1, num2, den2) == res);
+}
+
+void testChillDiv64(int64_t num, int64_t den, int64_t res)
+{
+    if (!is64Bit())
+        return;
+
+    // Test non-constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        
+        CHECK(compileAndRun(proc, num, den) == res);
+    }
+
+    // Test constant.
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, chill(Div), Origin(),
+                root->appendNew(proc, Origin(), num),
+                root->appendNew(proc, Origin(), den)));
+        
+        CHECK(compileAndRun(proc) == res);
+    }
+}
+
+void testModArg(int64_t value)
+{
+    if (!value)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testModArgs(int64_t numerator, int64_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms(int64_t numerator, int64_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModArg32(int32_t value)
+{
+    if (!value)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, Mod, Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testModArgs32(int32_t numerator, int32_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testModImms32(int32_t numerator, int32_t denominator)
+{
+    if (!denominator)
+        return;
+    if (numerator == std::numeric_limits::min() && denominator == -1)
+        return;
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, Mod, Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == numerator % denominator);
+}
+
+void testChillModArg(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testChillModArgs(int64_t numerator, int64_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms(int64_t numerator, int64_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModArg32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument, argument);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(!compileAndRun(proc, value));
+}
+
+void testChillModArgs32(int32_t numerator, int32_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testChillModImms32(int32_t numerator, int32_t denominator)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* argument1 = root->appendNew(proc, Origin(), numerator);
+    Value* argument2 = root->appendNew(proc, Origin(), denominator);
+    Value* result = root->appendNew(proc, chill(Mod), Origin(), argument1, argument2);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    CHECK(compileAndRun(proc, numerator, denominator) == chillMod(numerator, denominator));
+}
+
+void testSwitch(unsigned degree, unsigned gap = 1)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 0));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    for (unsigned i = 0; i < degree; ++i) {
+        BasicBlock* newBlock = proc.addBlock();
+        newBlock->appendNewControlValue(
+            proc, Return, Origin(),
+            newBlock->appendNew(
+                proc, Origin(), (i & 1) ? GPRInfo::argumentGPR2 : GPRInfo::argumentGPR1));
+        switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+    }
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < degree; ++i) {
+        CHECK(invoke(*code, i * gap, 42, 11) == ((i & 1) ? 11 : 42));
+        if (gap > 1) {
+            CHECK(!invoke(*code, i * gap + 1, 42, 11));
+            CHECK(!invoke(*code, i * gap - 1, 42, 11));
+        }
+    }
+
+    CHECK(!invoke(*code, -1, 42, 11));
+    CHECK(!invoke(*code, degree * gap, 42, 11));
+    CHECK(!invoke(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchChillDiv(unsigned degree, unsigned gap = 1)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* right = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 0));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    for (unsigned i = 0; i < degree; ++i) {
+        BasicBlock* newBlock = proc.addBlock();
+
+        newBlock->appendNewControlValue(
+            proc, Return, Origin(),
+            newBlock->appendNew(
+                proc, chill(Div), Origin(), (i & 1) ? right : left, (i & 1) ? left : right));
+        
+        switchValue->appendCase(SwitchCase(gap * i, FrequentedBlock(newBlock)));
+    }
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < degree; ++i) {
+        dataLog("i = ", i, "\n");
+        int32_t result = invoke(*code, i * gap, 42, 11);
+        dataLog("result = ", result, "\n");
+        CHECK(result == ((i & 1) ? 11/42 : 42/11));
+        if (gap > 1) {
+            CHECK(!invoke(*code, i * gap + 1, 42, 11));
+            CHECK(!invoke(*code, i * gap - 1, 42, 11));
+        }
+    }
+
+    CHECK(!invoke(*code, -1, 42, 11));
+    CHECK(!invoke(*code, degree * gap, 42, 11));
+    CHECK(!invoke(*code, degree * gap + 1, 42, 11));
+}
+
+void testSwitchTargettingSameBlock()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 5));
+
+    SwitchValue* switchValue = root->appendNew(
+        proc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    BasicBlock* otherTarget = proc.addBlock();
+    otherTarget->appendNewControlValue(
+        proc, Return, Origin(),
+        otherTarget->appendNew(proc, Origin(), 42));
+    switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+    switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < 20; ++i) {
+        int32_t expected = (i == 3 || i == 13) ? 42 : 5;
+        CHECK(invoke(*code, i) == expected);
+    }
+}
+
+void testSwitchTargettingSameBlockFoldPathConstant()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    BasicBlock* terminate = proc.addBlock();
+    terminate->appendNewControlValue(
+        proc, Return, Origin(),
+        terminate->appendNew(proc, Origin(), 42));
+
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    SwitchValue* switchValue = root->appendNew(proc, Origin(), argument);
+    switchValue->setFallThrough(FrequentedBlock(terminate));
+
+    BasicBlock* otherTarget = proc.addBlock();
+    otherTarget->appendNewControlValue(
+        proc, Return, Origin(), argument);
+    switchValue->appendCase(SwitchCase(3, FrequentedBlock(otherTarget)));
+    switchValue->appendCase(SwitchCase(13, FrequentedBlock(otherTarget)));
+
+    auto code = compile(proc);
+
+    for (unsigned i = 0; i < 20; ++i) {
+        int32_t expected = (i == 3 || i == 13) ? i : 42;
+        CHECK(invoke(*code, i) == expected);
+    }
+}
+
+void testTruncFold(int64_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(value));
+}
+
+void testZExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZExt32, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testZExt32Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZExt32, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testSExt32Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testTruncZExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, ZExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+void testTruncSExt32(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, SExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == value);
+}
+
+void testSExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(static_cast(value)));
+}
+
+void testSExt8SExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8SExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt8BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt8, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value & mask)));
+}
+
+void testBitAndSExt8(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(static_cast(value)) & mask));
+}
+
+void testSExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16Fold(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(proc, Origin(), value)));
+
+    CHECK(compileAndRun(proc) == static_cast(static_cast(value)));
+}
+
+void testSExt16SExt16(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16SExt8(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, SExt8, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value)));
+}
+
+void testSExt16BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt16, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(static_cast(value & mask)));
+}
+
+void testBitAndSExt16(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt16, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(static_cast(value)) & mask));
+}
+
+void testSExt32BitAnd(int32_t value, int32_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SExt32, Origin(),
+            root->appendNew(
+                proc, BitAnd, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), mask))));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value & mask));
+}
+
+void testBitAndSExt32(int32_t value, int64_t mask)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, SExt32, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0))),
+            root->appendNew(proc, Origin(), mask)));
+
+    CHECK(compileAndRun(proc, value) == (static_cast(value) & mask));
+}
+
+void testBasicSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 43, 1, 2) == 2);
+    CHECK(invoke(*code, 43, 642462, 32533) == 32533);
+}
+
+void testSelectTest()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 0, 1, 2) == 2);
+    CHECK(invoke(*code, 0, 642462, 32533) == 32533);
+}
+
+void testSelectCompareDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, -1.0, 1.0, 1, 2) == 1);
+    CHECK(invoke(*code, 42.5, 42.51, 642462, 32533) == 642462);
+    CHECK(invoke(*code, PNaN, 0.0, 1, 2) == 2);
+    CHECK(invoke(*code, 42.51, 42.5, 642462, 32533) == 32533);
+    CHECK(invoke(*code, 42.52, 42.52, 524978245, 352) == 352);
+}
+
+template
+void testSelectCompareFloat(float a, float b, bool (*operation)(float, float))
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, opcode, Origin(),
+                floatValue1,
+                floatValue2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloat(float a, float b)
+{
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a == b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a != b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a < b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a > b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a <= b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a >= b; });
+    testSelectCompareFloat(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+template
+void testSelectCompareFloatToDouble(float a, float b, bool (*operation)(float, float))
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* doubleValue1 = root->appendNew(proc, FloatToDouble, Origin(), floatValue1);
+    Value* doubleValue2 = root->appendNew(proc, FloatToDouble, Origin(), floatValue2);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, opcode, Origin(),
+                doubleValue1,
+                doubleValue2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42, -5), operation(a, b) ? 42 : -5));
+}
+
+void testSelectCompareFloatToDouble(float a, float b)
+{
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a == b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a != b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a < b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a > b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a <= b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a >= b; });
+    testSelectCompareFloatToDouble(a, b, [](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1.5, 2.6) == 1.5);
+    CHECK(invoke(*code, 42, 642462.7, 32533.8) == 642462.7);
+    CHECK(invoke(*code, 43, 1.9, 2.0) == 2.0);
+    CHECK(invoke(*code, 43, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleTest()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1.5, 2.6) == 1.5);
+    CHECK(invoke(*code, 42, 642462.7, 32533.8) == 642462.7);
+    CHECK(invoke(*code, 0, 1.9, 2.0) == 2.0);
+    CHECK(invoke(*code, 0, 642462.1, 32533.2) == 32533.2);
+}
+
+void testSelectDoubleCompareDouble()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+                root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR2),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR3)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, -1.0, 1.0, 1.1, 2.2) == 1.1);
+    CHECK(invoke(*code, 42.5, 42.51, 642462.3, 32533.4) == 642462.3);
+    CHECK(invoke(*code, PNaN, 0.0, 1.5, 2.6) == 2.6);
+    CHECK(invoke(*code, 42.51, 42.5, 642462.7, 32533.8) == 32533.8);
+    CHECK(invoke(*code, 42.52, 42.52, 524978245.9, 352.0) == 352.0);
+}
+
+void testSelectDoubleCompareFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                floatValue1,
+                floatValue2),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR0),
+            root->appendNew(proc, Origin(), FPRInfo::argumentFPR1)));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), 42.1, -M_PI), a < b ? 42.1 : -M_PI));
+}
+
+void testSelectFloatCompareFloat(float a, float b)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* argument1int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* argument2int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* argument3int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR2));
+    Value* argument4int32 = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR3));
+    Value* floatValue1 = root->appendNew(proc, BitwiseCast, Origin(), argument1int32);
+    Value* floatValue2 = root->appendNew(proc, BitwiseCast, Origin(), argument2int32);
+    Value* floatValue3 = root->appendNew(proc, BitwiseCast, Origin(), argument3int32);
+    Value* floatValue4 = root->appendNew(proc, BitwiseCast, Origin(), argument4int32);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, LessThan, Origin(),
+                floatValue1,
+                floatValue2),
+            floatValue3,
+            floatValue4));
+
+    CHECK(isIdentical(compileAndRun(proc, bitwise_cast(a), bitwise_cast(b), bitwise_cast(1.1f), bitwise_cast(-42.f)), a < b ? 1.1f : -42.f));
+}
+
+
+template
+void testSelectDoubleCompareDouble(bool (*operation)(double, double))
+{
+    { // Compare arguments and selected arguments are all different.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg3));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+        Value* arg3 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR3);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, -66.5), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg0));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, left.value), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+        Value* arg1 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR1);
+        Value* arg2 = root->appendNew(proc, Origin(), FPRInfo::argumentFPR2);
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg0);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                double expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, left.value, right.value, 42.5, left.value), expected));
+            }
+        }
+    }
+}
+
+void testSelectDoubleCompareDoubleWithAliasing()
+{
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a == b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a != b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a < b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a > b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a <= b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a >= b; });
+    testSelectDoubleCompareDouble([](double a, double b) -> bool { return a != a || b != b || a == b; });
+}
+
+template
+void testSelectFloatCompareFloat(bool (*operation)(float, float))
+{
+    { // Compare arguments and selected arguments are all different.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg3));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. "elseCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // Compare arguments and selected arguments are all different. Both cases are live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+        Value* arg3 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR3)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg3);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->append(ConstrainedValue(arg3, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : -66.5;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(-66.5f)), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+        root->appendNewControlValue(
+            proc, Return, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, opcode, Origin(),
+                    arg0,
+                    arg1),
+                arg2,
+                arg0));
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(left.value)), expected));
+            }
+        }
+    }
+    { // The left argument is the same as the "elseCase" argument. "thenCase" is live after operation.
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* arg0 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+        Value* arg1 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+        Value* arg2 = root->appendNew(proc, BitwiseCast, Origin(),
+            root->appendNew(proc, Trunc, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+        Value* result = root->appendNew(proc, Select, Origin(),
+            root->appendNew(proc, opcode, Origin(), arg0, arg1),
+            arg2,
+            arg0);
+
+        PatchpointValue* keepValuesLive = root->appendNew(proc, Void, Origin());
+        keepValuesLive->append(ConstrainedValue(arg2, ValueRep::SomeRegister));
+        keepValuesLive->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+
+        root->appendNewControlValue(proc, Return, Origin(), result);
+        auto code = compile(proc);
+
+        for (auto& left : floatingPointOperands()) {
+            for (auto& right : floatingPointOperands()) {
+                float expected = operation(left.value, right.value) ? 42.5 : left.value;
+                CHECK(isIdentical(invoke(*code, bitwise_cast(left.value), bitwise_cast(right.value), bitwise_cast(42.5f), bitwise_cast(left.value)), expected));
+            }
+        }
+    }
+}
+
+void testSelectFloatCompareFloatWithAliasing()
+{
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a == b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a != b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a < b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a > b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a <= b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a >= b; });
+    testSelectFloatCompareFloat([](float a, float b) -> bool { return a != a || b != b || a == b; });
+}
+
+void testSelectFold(intptr_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(proc, Origin(), value),
+                root->appendNew(proc, Origin(), 42)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 1, 2) == (value == 42 ? 1 : 2));
+    CHECK(invoke(*code, 642462, 32533) == (value == 42 ? 642462 : 32533));
+}
+
+void testSelectInvert()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Select, Origin(),
+            root->appendNew(
+                proc, Equal, Origin(),
+                root->appendNew(
+                    proc, NotEqual, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                    root->appendNew(proc, Origin(), 42)),
+                root->appendNew(proc, Origin(), 0)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR2)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 42, 1, 2) == 1);
+    CHECK(invoke(*code, 42, 642462, 32533) == 642462);
+    CHECK(invoke(*code, 43, 1, 2) == 2);
+    CHECK(invoke(*code, 43, 642462, 32533) == 32533);
+}
+
+void testCheckSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR0)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -42),
+                root->appendNew(proc, Origin(), 35)),
+            root->appendNew(proc, Origin(), 42)));
+    unsigned generationCount = 0;
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount++;
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(generationCount == 1);
+    CHECK(invoke(*code, true) == 0);
+    CHECK(invoke(*code, false) == 666);
+}
+
+void testCheckSelectCheckSelect()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CheckValue* check = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR0)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -42),
+                root->appendNew(proc, Origin(), 35)),
+            root->appendNew(proc, Origin(), 42)));
+
+    unsigned generationCount = 0;
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount++;
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    CheckValue* check2 = root->appendNew(
+        proc, Check, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Select, Origin(),
+                root->appendNew(
+                    proc, BitAnd, Origin(),
+                    root->appendNew(
+                        proc, Trunc, Origin(),
+                        root->appendNew(
+                            proc, Origin(), GPRInfo::argumentGPR1)),
+                    root->appendNew(proc, Origin(), 0xff)),
+                root->appendNew(proc, Origin(), -43),
+                root->appendNew(proc, Origin(), 36)),
+            root->appendNew(proc, Origin(), 43)));
+
+    unsigned generationCount2 = 0;
+    check2->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            generationCount2++;
+            jit.move(CCallHelpers::TrustedImm32(667), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Origin(), 0));
+
+    auto code = compile(proc);
+    CHECK(generationCount == 1);
+    CHECK(generationCount2 == 1);
+    CHECK(invoke(*code, true, true) == 0);
+    CHECK(invoke(*code, false, true) == 666);
+    CHECK(invoke(*code, true, false) == 667);
+}
+
+double b3Pow(double x, int y)
+{
+    if (y < 0 || y > 1000)
+        return pow(x, y);
+    double result = 1;
+    while (y) {
+        if (y & 1)
+            result *= x;
+        x *= x;
+        y >>= 1;
+    }
+    return result;
+}
+
+void testPowDoubleByIntegerLoop(double xOperand, int32_t yOperand)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* x = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    Value* y = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    auto result = powDoubleInt32(proc, root, Origin(), x, y);
+    BasicBlock* continuation = result.first;
+    continuation->appendNewControlValue(proc, Return, Origin(), result.second);
+
+    CHECK(isIdentical(compileAndRun(proc, xOperand, yOperand), b3Pow(xOperand, yOperand)));
+}
+
+void testTruncOrHigh()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x100000000))));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x56781234);
+}
+
+void testTruncOrLow()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x1000000))));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x57781234);
+}
+
+void testBitAndOrHigh()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x8)),
+            root->appendNew(proc, Origin(), 0x777777777777)));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x123456701234ll);
+}
+
+void testBitAndOrLow()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(
+                proc, BitOr, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), 0x1)),
+            root->appendNew(proc, Origin(), 0x777777777777)));
+
+    int64_t value = 0x123456781234;
+    CHECK(compileAndRun(proc, value) == 0x123456701235ll);
+}
+
+void testBranch64Equal(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, left, right) == (left == right));
+}
+
+void testBranch64EqualImm(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), right);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, left) == (left == right));
+}
+
+void testBranch64EqualMem(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(
+        proc, Load, pointerType(), Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, &left, right) == (left == right));
+}
+
+void testBranch64EqualMemImm(int64_t left, int64_t right)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+
+    Value* arg1 = root->appendNew(
+        proc, Load, pointerType(), Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg2 = root->appendNew(proc, Origin(), right);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(proc, Equal, Origin(), arg1, arg2),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    bool trueResult = true;
+    thenCase->appendNewControlValue(
+        proc, Return, Origin(),
+        thenCase->appendNew(
+            proc, Load8Z, Origin(),
+            thenCase->appendNew(proc, Origin(), &trueResult)));
+
+    bool elseResult = false;
+    elseCase->appendNewControlValue(
+        proc, Return, Origin(),
+        elseCase->appendNew(
+            proc, Load8Z, Origin(),
+            elseCase->appendNew(proc, Origin(), &elseResult)));
+
+    CHECK(compileAndRun(proc, &left) == (left == right));
+}
+
+void testStore8Load8Z(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    
+    int8_t byte;
+    Value* ptr = root->appendNew(proc, Origin(), &byte);
+    
+    root->appendNew(
+        proc, Store8, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        ptr);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load8Z, Origin(), ptr));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testStore16Load16Z(int32_t value)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    
+    int16_t byte;
+    Value* ptr = root->appendNew(proc, Origin(), &byte);
+    
+    root->appendNew(
+        proc, Store16, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        ptr);
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Load16Z, Origin(), ptr));
+
+    CHECK(compileAndRun(proc, value) == static_cast(value));
+}
+
+void testSShrShl32(int32_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(
+                    proc, Trunc, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+                root->appendNew(proc, Origin(), shlAmount)),
+            root->appendNew(proc, Origin(), sshrAmount)));
+
+    CHECK(
+        compileAndRun(proc, value)
+        == ((value << (shlAmount & 31)) >> (sshrAmount & 31)));
+}
+
+void testSShrShl64(int64_t value, int32_t sshrAmount, int32_t shlAmount)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(proc, Origin(), shlAmount)),
+            root->appendNew(proc, Origin(), sshrAmount)));
+
+    CHECK(
+        compileAndRun(proc, value)
+        == ((value << (shlAmount & 63)) >> (sshrAmount & 63)));
+}
+
+template
+void testRotR(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotR, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template
+void testRotL(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendNew(proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotL, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template
+void testRotRWithImmShift(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotR, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateRight(valueInt, shift));
+}
+
+template
+void testRotLWithImmShift(T valueInt, int32_t shift)
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* value = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (sizeof(T) == 4)
+        value = root->appendNew(proc, Trunc, Origin(), value);
+
+    Value* ammount = root->appendIntConstant(proc, Origin(), Int32, shift);
+    root->appendNewControlValue(proc, Return, Origin(),
+        root->appendNew(proc, RotL, Origin(), value, ammount));
+
+    CHECK_EQ(compileAndRun(proc, valueInt, shift), rotateLeft(valueInt, shift));
+}
+
+template
+void testComputeDivisionMagic(T value, T magicMultiplier, unsigned shift)
+{
+    DivisionMagic magic = computeDivisionMagic(value);
+    CHECK(magic.magicMultiplier == magicMultiplier);
+    CHECK(magic.shift == shift);
+}
+
+void testTrivialInfiniteLoop()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loop = proc.addBlock();
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+    loop->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(loop));
+
+    compile(proc);
+}
+
+void testFoldPathEqual()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenBlock = proc.addBlock();
+    BasicBlock* elseBlock = proc.addBlock();
+
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+
+    root->appendNewControlValue(
+        proc, Branch, Origin(), arg, FrequentedBlock(thenBlock), FrequentedBlock(elseBlock));
+
+    thenBlock->appendNewControlValue(
+        proc, Return, Origin(),
+        thenBlock->appendNew(
+            proc, Equal, Origin(), arg, thenBlock->appendNew(proc, Origin(), 0)));
+
+    elseBlock->appendNewControlValue(
+        proc, Return, Origin(),
+        elseBlock->appendNew(
+            proc, Equal, Origin(), arg, elseBlock->appendNew(proc, Origin(), 0)));
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 0) == 1);
+    CHECK(invoke(*code, 1) == 0);
+    CHECK(invoke(*code, 42) == 0);
+}
+
+void testLShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, Shl, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int32_t value) {
+        CHECK(invoke(*code, value) == value << (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testRShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, SShr, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int32_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testURShiftSelf32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(proc, ZShr, Origin(), arg, arg));
+
+    auto code = compile(proc);
+
+    auto check = [&] (uint32_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 31));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+}
+
+void testLShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int64_t value) {
+        CHECK(invoke(*code, value) == value << (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testRShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, SShr, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (int64_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testURShiftSelf64()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    root->appendNewControlValue(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, ZShr, Origin(), arg, root->appendNew(proc, Trunc, Origin(), arg)));
+
+    auto code = compile(proc);
+
+    auto check = [&] (uint64_t value) {
+        CHECK(invoke(*code, value) == value >> (value & 63));
+    };
+
+    check(0);
+    check(1);
+    check(31);
+    check(32);
+    check(63);
+    check(64);
+}
+
+void testPatchpointDoubleRegs()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* arg = root->appendNew(proc, Origin(), FPRInfo::argumentFPR0);
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Double, Origin());
+    patchpoint->append(arg, ValueRep(FPRInfo::fpRegT0));
+    patchpoint->resultConstraint = ValueRep(FPRInfo::fpRegT0);
+
+    unsigned numCalls = 0;
+    patchpoint->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            numCalls++;
+        });
+
+    root->appendNewControlValue(proc, Return, Origin(), patchpoint);
+
+    auto code = compile(proc);
+    CHECK(numCalls == 1);
+    CHECK(invoke(*code, 42.5) == 42.5);
+}
+
+void testSpillDefSmallerThanUse()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // Move32.
+    Value* arg32 = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* arg64 = root->appendNew(proc, ZExt32, Origin(), arg32);
+
+    // Make sure arg64 is on the stack.
+    PatchpointValue* forceSpill = root->appendNew(proc, Int64, Origin());
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+    clobberSet.clear(GPRInfo::returnValueGPR); // Force the return value for aliasing below.
+    forceSpill->clobberLate(clobberSet);
+    forceSpill->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.xor64(params[0].gpr(), params[0].gpr());
+        });
+
+    // On x86, Sub admit an address for any operand. If it uses the stack, the top bits must be zero.
+    Value* result = root->appendNew(proc, Sub, Origin(), forceSpill, arg64);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 0xffffffff00000000) == 0);
+}
+
+void testSpillUseLargerThanDef()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* thenCase = proc.addBlock();
+    BasicBlock* elseCase = proc.addBlock();
+    BasicBlock* tail = proc.addBlock();
+
+    RegisterSet clobberSet = RegisterSet::allGPRs();
+    clobberSet.exclude(RegisterSet::stackRegisters());
+    clobberSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+    Value* condition = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* argument = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNewControlValue(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, Trunc, Origin(),
+            condition),
+        FrequentedBlock(thenCase), FrequentedBlock(elseCase));
+
+    Value* truncated = thenCase->appendNew(proc, ZExt32, Origin(),
+        thenCase->appendNew(proc, Trunc, Origin(), argument));
+    UpsilonValue* thenResult = thenCase->appendNew(proc, Origin(), truncated);
+    thenCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    UpsilonValue* elseResult = elseCase->appendNew(proc, Origin(), argument);
+    elseCase->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(tail));
+
+    for (unsigned i = 0; i < 100; ++i) {
+        PatchpointValue* preventTailDuplication = tail->appendNew(proc, Void, Origin());
+        preventTailDuplication->clobberLate(clobberSet);
+        preventTailDuplication->setGenerator([] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    PatchpointValue* forceSpill = tail->appendNew(proc, Void, Origin());
+    forceSpill->clobberLate(clobberSet);
+    forceSpill->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            clobberSet.forEach([&] (Reg reg) {
+                jit.move(CCallHelpers::TrustedImm64(0xffffffffffffffff), reg.gpr());
+            });
+        });
+
+    Value* phi = tail->appendNew(proc, Phi, Int64, Origin());
+    thenResult->setPhi(phi);
+    elseResult->setPhi(phi);
+    tail->appendNewControlValue(proc, Return, Origin(), phi);
+
+    auto code = compile(proc);
+    CHECK(invoke(*code, 1, 0xffffffff00000000) == 0);
+    CHECK(invoke(*code, 0, 0xffffffff00000000) == 0xffffffff00000000);
+
+    // A second time since the previous run is still on the stack.
+    CHECK(invoke(*code, 1, 0xffffffff00000000) == 0);
+
+}
+
+void testLateRegister()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    // This works by making all but 1 register be input to the first patchpoint as LateRegister.
+    // The other 1 register is just a regular Register input. We assert our result is the regular
+    // register input. There would be no other way for the register allocator to arrange things
+    // because LateRegister interferes with the result.
+    // Then, the second patchpoint takes the result of the first as an argument and asks for
+    // it in a register that was a LateRegister. This is to incentivize the register allocator
+    // to use that LateRegister as the result for the first patchpoint. But of course it can not do that.
+    // So it must issue a mov after the first patchpoint from the first's result into the second's input.
+
+    RegisterSet regs = RegisterSet::allGPRs();
+    regs.exclude(RegisterSet::stackRegisters());
+    regs.exclude(RegisterSet::reservedHardwareRegisters());
+    Vector lateUseArgs;
+    unsigned result = 0;
+    for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+        if (!regs.get(reg))
+            continue;
+        result++;
+        if (reg == GPRInfo::regT0)
+            continue;
+        Value* value = root->appendNew(proc, Origin(), 1);
+        lateUseArgs.append(value);
+    }
+    Value* regularUse = root->appendNew(proc, Origin(), 1);
+    PatchpointValue* firstPatchpoint = root->appendNew(proc, Int64, Origin());
+    {
+        unsigned i = 0;
+        for (GPRReg reg = CCallHelpers::firstRegister(); reg <= CCallHelpers::lastRegister(); reg = CCallHelpers::nextRegister(reg)) {
+            if (!regs.get(reg))
+                continue;
+            if (reg == GPRInfo::regT0)
+                continue;
+            Value* value = lateUseArgs[i++];
+            firstPatchpoint->append(value, ValueRep::lateReg(reg));
+        }
+        firstPatchpoint->append(regularUse, ValueRep::reg(GPRInfo::regT0));
+    }
+
+    firstPatchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params[0].gpr() == GPRInfo::regT0);
+            // Note that regT0 should also start off as 1, so we're implicitly starting our add with 1, which is also an argument.
+            unsigned skipped = 0;
+            for (unsigned i = 1; i < params.size(); i++) {
+                if (params[i].gpr() == params[0].gpr()) {
+                    skipped = i;
+                    continue;
+                }
+                jit.add64(params[i].gpr(), params[0].gpr());
+            }
+            CHECK(!!skipped);
+        });
+
+    PatchpointValue* secondPatchpoint = root->appendNew(proc, Int64, Origin());
+    secondPatchpoint->append(firstPatchpoint, ValueRep::reg(GPRInfo::regT1));
+    secondPatchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            CHECK(params[1].gpr() == GPRInfo::regT1);
+            jit.nop();
+            jit.nop();
+            jit.move(params[1].gpr(), params[0].gpr());
+            jit.nop();
+            jit.nop();
+        });
+    root->appendNewControlValue(proc, Return, Origin(), secondPatchpoint);
+    
+    auto code = compile(proc);
+    CHECK(invoke(*code) == result);
+}
+
+void interpreterPrint(Vector* stream, intptr_t value)
+{
+    stream->append(value);
+}
+
+void testInterpreter()
+{
+    // This implements a silly interpreter to test building custom switch statements using
+    // Patchpoint.
+    
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* dispatch = proc.addBlock();
+    BasicBlock* addToDataPointer = proc.addBlock();
+    BasicBlock* addToCodePointer = proc.addBlock();
+    BasicBlock* addToCodePointerTaken = proc.addBlock();
+    BasicBlock* addToCodePointerNotTaken = proc.addBlock();
+    BasicBlock* addToData = proc.addBlock();
+    BasicBlock* print = proc.addBlock();
+    BasicBlock* stop = proc.addBlock();
+    
+    Variable* dataPointer = proc.addVariable(pointerType());
+    Variable* codePointer = proc.addVariable(pointerType());
+    
+    root->appendNew(
+        proc, Set, Origin(), dataPointer,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNew(
+        proc, Set, Origin(), codePointer,
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* context = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+    root->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+    // NOTE: It's totally valid for this patchpoint to be tail-duplicated.
+    Value* codePointerValue =
+        dispatch->appendNew(proc, B3::Get, Origin(), codePointer);
+    Value* opcode = dispatch->appendNew(
+        proc, Load, pointerType(), Origin(), codePointerValue);
+    PatchpointValue* polyJump = dispatch->appendNew(proc, Void, Origin());
+    polyJump->effects = Effects();
+    polyJump->effects.terminal = true;
+    polyJump->appendSomeRegister(opcode);
+    polyJump->clobber(RegisterSet::macroScratchRegisters());
+    polyJump->numGPScratchRegisters++;
+    dispatch->appendSuccessor(FrequentedBlock(addToDataPointer));
+    dispatch->appendSuccessor(FrequentedBlock(addToCodePointer));
+    dispatch->appendSuccessor(FrequentedBlock(addToData));
+    dispatch->appendSuccessor(FrequentedBlock(print));
+    dispatch->appendSuccessor(FrequentedBlock(stop));
+    
+    // Our "opcodes".
+    static const intptr_t AddDP = 0;
+    static const intptr_t AddCP = 1;
+    static const intptr_t Add = 2;
+    static const intptr_t Print = 3;
+    static const intptr_t Stop = 4;
+    
+    polyJump->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            Vector> labels = params.successorLabels();
+
+            MacroAssemblerCodePtr* jumpTable = bitwise_cast(
+                params.proc().addDataSection(sizeof(MacroAssemblerCodePtr) * labels.size()));
+
+            jit.move(CCallHelpers::TrustedImmPtr(jumpTable), params.gpScratch(0));
+            jit.jump(CCallHelpers::BaseIndex(params.gpScratch(0), params[0].gpr(), CCallHelpers::timesPtr()));
+            
+            jit.addLinkTask(
+                [&, jumpTable, labels] (LinkBuffer& linkBuffer) {
+                    for (unsigned i = labels.size(); i--;)
+                        jumpTable[i] = linkBuffer.locationOf(*labels[i]);
+                });
+        });
+    
+    // AddDP : adds  to DP.
+    codePointerValue =
+        addToDataPointer->appendNew(proc, B3::Get, Origin(), codePointer);
+    addToDataPointer->appendNew(
+        proc, Set, Origin(), dataPointer,
+        addToDataPointer->appendNew(
+            proc, B3::Add, Origin(),
+            addToDataPointer->appendNew(proc, B3::Get, Origin(), dataPointer),
+            addToDataPointer->appendNew(
+                proc, Mul, Origin(),
+                addToDataPointer->appendNew(
+                    proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+                addToDataPointer->appendIntConstant(
+                    proc, Origin(), pointerType(), sizeof(intptr_t)))));
+    addToDataPointer->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToDataPointer->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToDataPointer->appendIntConstant(
+                proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToDataPointer->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // AddCP : adds  to CP if the current value at DP is non-zero, otherwise
+    // falls through normally.
+    codePointerValue =
+        addToCodePointer->appendNew(proc, B3::Get, Origin(), codePointer);
+    Value* dataPointerValue =
+        addToCodePointer->appendNew(proc, B3::Get, Origin(), dataPointer);
+    addToCodePointer->appendNewControlValue(
+        proc, Branch, Origin(),
+        addToCodePointer->appendNew(
+            proc, Load, pointerType(), Origin(), dataPointerValue),
+        FrequentedBlock(addToCodePointerTaken), FrequentedBlock(addToCodePointerNotTaken));
+    addToCodePointerTaken->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToCodePointerTaken->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToCodePointerTaken->appendNew(
+                proc, Mul, Origin(),
+                addToCodePointerTaken->appendNew(
+                    proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t)),
+                addToCodePointerTaken->appendIntConstant(
+                    proc, Origin(), pointerType(), sizeof(intptr_t)))));
+    addToCodePointerTaken->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    addToCodePointerNotTaken->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToCodePointerNotTaken->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToCodePointerNotTaken->appendIntConstant(
+                proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToCodePointerNotTaken->appendNewControlValue(
+        proc, Jump, Origin(), FrequentedBlock(dispatch));
+
+    // Add : adds  to the slot pointed to by DP.
+    codePointerValue = addToData->appendNew(proc, B3::Get, Origin(), codePointer);
+    dataPointerValue = addToData->appendNew(proc, B3::Get, Origin(), dataPointer);
+    addToData->appendNew(
+        proc, Store, Origin(),
+        addToData->appendNew(
+            proc, B3::Add, Origin(),
+            addToData->appendNew(
+                proc, Load, pointerType(), Origin(), dataPointerValue),
+            addToData->appendNew(
+                proc, Load, pointerType(), Origin(), codePointerValue, sizeof(intptr_t))),
+        dataPointerValue);
+    addToData->appendNew(
+        proc, Set, Origin(), codePointer,
+        addToData->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            addToData->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t) * 2)));
+    addToData->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // Print: "prints" the value pointed to by DP. What this actually means is that the value is
+    // appended to the stream vector by the interpreterPrint function.
+    codePointerValue = print->appendNew(proc, B3::Get, Origin(), codePointer);
+    dataPointerValue = print->appendNew(proc, B3::Get, Origin(), dataPointer);
+    print->appendNew(
+        proc, Void, Origin(),
+        print->appendNew(
+            proc, Origin(), bitwise_cast(interpreterPrint)),
+        context,
+        print->appendNew(proc, Load, pointerType(), Origin(), dataPointerValue));
+    print->appendNew(
+        proc, Set, Origin(), codePointer,
+        print->appendNew(
+            proc, B3::Add, Origin(), codePointerValue,
+            print->appendIntConstant(proc, Origin(), pointerType(), sizeof(intptr_t))));
+    print->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(dispatch));
+    
+    // Stop: returns.
+    stop->appendNewControlValue(
+        proc, Return, Origin(),
+        stop->appendIntConstant(proc, Origin(), pointerType(), 0));
+    
+    auto interpreter = compile(proc);
+    
+    Vector data;
+    Vector code;
+    Vector stream;
+    
+    data.append(1);
+    data.append(0);
+    
+    if (shouldBeVerbose())
+        dataLog("data = ", listDump(data), "\n");
+    
+    // We'll write a program that prints the numbers 1..100.
+    // We expect DP to point at #0.
+    code.append(AddCP);
+    code.append(6); // go to loop body
+    
+    // Loop re-entry:
+    // We expect DP to point at #1 and for #1 to be offset by -100.
+    code.append(Add);
+    code.append(100);
+    
+    code.append(AddDP);
+    code.append(-1);
+    
+    // Loop header:
+    // We expect DP to point at #0.
+    code.append(AddDP);
+    code.append(1);
+    
+    code.append(Add);
+    code.append(1);
+    
+    code.append(Print);
+    
+    code.append(Add);
+    code.append(-100);
+    
+    // We want to stop if it's zero and continue if it's non-zero. AddCP takes the branch if it's
+    // non-zero.
+    code.append(AddCP);
+    code.append(-11); // go to loop re-entry.
+    
+    code.append(Stop);
+    
+    if (shouldBeVerbose())
+        dataLog("code = ", listDump(code), "\n");
+    
+    CHECK(!invoke(*interpreter, data.data(), code.data(), &stream));
+    
+    CHECK(stream.size() == 100);
+    for (unsigned i = 0; i < 100; ++i)
+        CHECK(stream[i] == i + 1);
+    
+    if (shouldBeVerbose())
+        dataLog("stream = ", listDump(stream), "\n");
+}
+
+void testReduceStrengthCheckBottomUseInAnotherBlock()
+{
+    Procedure proc;
+    
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    CheckValue* check = one->appendNew(
+        proc, Check, Origin(), one->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+
+            jit.move(CCallHelpers::TrustedImm32(666), GPRInfo::returnValueGPR);
+            jit.emitFunctionEpilogue();
+            jit.ret();
+        });
+    Value* arg = one->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    one->appendNewControlValue(proc, Jump, Origin(), FrequentedBlock(two));
+    
+    check = two->appendNew(
+        proc, CheckAdd, Origin(), arg,
+        two->appendNew(proc, Origin(), 1));
+    check->setGenerator(
+        [&] (CCallHelpers&, const StackmapGenerationParams&) {
+            CHECK(!"Should not execute");
+        });
+    two->appendNewControlValue(proc, Return, Origin(), check);
+    
+    proc.resetReachability();
+    reduceStrength(proc);
+}
+
+void testResetReachabilityDanglingReference()
+{
+    Procedure proc;
+    
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    UpsilonValue* upsilon = one->appendNew(
+        proc, Origin(), one->appendNew(proc, Origin(), 42));
+    one->appendNewControlValue(proc, Oops, Origin());
+    
+    Value* phi = two->appendNew(proc, Phi, Int32, Origin());
+    upsilon->setPhi(phi);
+    two->appendNewControlValue(proc, Oops, Origin());
+    
+    proc.resetReachability();
+    validate(proc);
+}
+
+void testEntrySwitchSimple()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    
+    root->appendNew(proc, EntrySwitch, Origin());
+    root->appendSuccessor(FrequentedBlock(one));
+    root->appendSuccessor(FrequentedBlock(two));
+    root->appendSuccessor(FrequentedBlock(three));
+    
+    one->appendNew(
+        proc, Return, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            one->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            one->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    two->appendNew(
+        proc, Return, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            two->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            two->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    three->appendNew(
+        proc, Return, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            three->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            three->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK(invoke(labelOne, 1, 2) == 3);
+    CHECK(invoke(labelTwo, 1, 2) == -1);
+    CHECK(invoke(labelThree, 1, 2) == 2);
+    CHECK(invoke(labelOne, -1, 2) == 1);
+    CHECK(invoke(labelTwo, -1, 2) == -3);
+    CHECK(invoke(labelThree, -1, 2) == -2);
+}
+
+void testEntrySwitchNoEntrySwitch()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1)));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2), 3);
+    CHECK_EQ(invoke(labelThree, 1, 2), 3);
+    CHECK_EQ(invoke(labelOne, -1, 2), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2), 1);
+    CHECK_EQ(invoke(labelThree, -1, 2), 1);
+}
+
+void testEntrySwitchWithCommonPaths()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+    
+    root->appendNew(proc, EntrySwitch, Origin());
+    root->appendSuccessor(FrequentedBlock(one));
+    root->appendSuccessor(FrequentedBlock(two));
+    root->appendSuccessor(FrequentedBlock(three));
+    
+    UpsilonValue* upsilonOne = one->appendNew(
+        proc, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    one->appendNew(proc, Jump, Origin());
+    one->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonTwo = two->appendNew(
+        proc, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    two->appendNew(proc, Jump, Origin());
+    two->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonThree = three->appendNew(
+        proc, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+            three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    three->appendNew(proc, Jump, Origin());
+    three->setSuccessors(FrequentedBlock(end));
+    
+    Value* phi = end->appendNew(proc, Phi, Int32, Origin());
+    upsilonOne->setPhi(phi);
+    upsilonTwo->setPhi(phi);
+    upsilonThree->setPhi(phi);
+    
+    end->appendNew(
+        proc, Return, Origin(),
+        end->appendNew(
+            proc, chill(Mod), Origin(),
+            phi, end->appendNew(
+                proc, Trunc, Origin(),
+                end->appendNew(proc, Origin(), GPRInfo::argumentGPR2))));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2, 10), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10), 2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10), -3);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10), -2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 2), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 2), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 2), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 2), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 2), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 2), 0);
+    CHECK_EQ(invoke(labelOne, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelThree, 1, 2, 0), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 0), 0);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 0), 0);
+    CHECK_EQ(invoke(labelThree, -1, 2, 0), 0);
+}
+
+void testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint()
+{
+    Procedure proc;
+    proc.setNumEntrypoints(3);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* negate = proc.addBlock();
+    BasicBlock* dispatch = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    BasicBlock* three = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+
+    UpsilonValue* upsilonBase = root->appendNew(
+        proc, Origin(), root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    root->appendNew(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR3),
+            root->appendNew(proc, Origin(), 0xff)));
+    root->setSuccessors(FrequentedBlock(negate), FrequentedBlock(dispatch));
+    
+    UpsilonValue* upsilonNegate = negate->appendNew(
+        proc, Origin(),
+        negate->appendNew(
+            proc, Neg, Origin(),
+            negate->appendNew(
+                proc, Trunc, Origin(),
+                negate->appendNew(proc, Origin(), GPRInfo::argumentGPR0))));
+    negate->appendNew(proc, Jump, Origin());
+    negate->setSuccessors(FrequentedBlock(dispatch));
+    
+    Value* arg0 = dispatch->appendNew(proc, Phi, Int32, Origin());
+    upsilonBase->setPhi(arg0);
+    upsilonNegate->setPhi(arg0);
+    dispatch->appendNew(proc, EntrySwitch, Origin());
+    dispatch->appendSuccessor(FrequentedBlock(one));
+    dispatch->appendSuccessor(FrequentedBlock(two));
+    dispatch->appendSuccessor(FrequentedBlock(three));
+    
+    UpsilonValue* upsilonOne = one->appendNew(
+        proc, Origin(),
+        one->appendNew(
+            proc, Add, Origin(),
+            arg0, one->appendNew(
+                proc, Trunc, Origin(),
+                one->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    one->appendNew(proc, Jump, Origin());
+    one->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonTwo = two->appendNew(
+        proc, Origin(),
+        two->appendNew(
+            proc, Sub, Origin(),
+            arg0, two->appendNew(
+                proc, Trunc, Origin(),
+                two->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    two->appendNew(proc, Jump, Origin());
+    two->setSuccessors(FrequentedBlock(end));
+    
+    UpsilonValue* upsilonThree = three->appendNew(
+        proc, Origin(),
+        three->appendNew(
+            proc, Mul, Origin(),
+            arg0, three->appendNew(
+                proc, Trunc, Origin(),
+                three->appendNew(proc, Origin(), GPRInfo::argumentGPR1))));
+    three->appendNew(proc, Jump, Origin());
+    three->setSuccessors(FrequentedBlock(end));
+    
+    Value* phi = end->appendNew(proc, Phi, Int32, Origin());
+    upsilonOne->setPhi(phi);
+    upsilonTwo->setPhi(phi);
+    upsilonThree->setPhi(phi);
+    
+    end->appendNew(
+        proc, Return, Origin(),
+        end->appendNew(
+            proc, chill(Mod), Origin(),
+            phi, end->appendNew(
+                proc, Trunc, Origin(),
+                end->appendNew(proc, Origin(), GPRInfo::argumentGPR2))));
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    CodeLocationLabel labelThree = linkBuffer.locationOf(proc.entrypointLabel(2));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+    
+    CHECK_EQ(invoke(labelOne, 1, 2, 10, false), 3);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10, false), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10, false), 2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10, false), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10, false), -3);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10, false), -2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 10, true), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 10, true), -3);
+    CHECK_EQ(invoke(labelThree, 1, 2, 10, true), -2);
+    CHECK_EQ(invoke(labelOne, -1, 2, 10, true), 3);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 10, true), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 10, true), 2);
+    CHECK_EQ(invoke(labelOne, 1, 2, 2, false), 1);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 2, false), -1);
+    CHECK_EQ(invoke(labelThree, 1, 2, 2, false), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 2, false), 1);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 2, false), -1);
+    CHECK_EQ(invoke(labelThree, -1, 2, 2, false), 0);
+    CHECK_EQ(invoke(labelOne, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelTwo, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelThree, 1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelOne, -1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelTwo, -1, 2, 0, false), 0);
+    CHECK_EQ(invoke(labelThree, -1, 2, 0, false), 0);
+}
+
+void testEntrySwitchLoop()
+{
+    // This is a completely absurd use of EntrySwitch, where it impacts the loop condition. This
+    // should cause duplication of either nearly the entire Procedure. At time of writing, we ended
+    // up duplicating all of it, which is fine. It's important to test this case, to make sure that
+    // the duplication algorithm can handle interesting control flow.
+    
+    Procedure proc;
+    proc.setNumEntrypoints(2);
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* loopHeader = proc.addBlock();
+    BasicBlock* loopFooter = proc.addBlock();
+    BasicBlock* end = proc.addBlock();
+
+    UpsilonValue* initialValue = root->appendNew(
+        proc, Origin(), root->appendNew(
+            proc, Trunc, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)));
+    root->appendNew(proc, Jump, Origin());
+    root->setSuccessors(loopHeader);
+    
+    Value* valueInLoop = loopHeader->appendNew(proc, Phi, Int32, Origin());
+    initialValue->setPhi(valueInLoop);
+    Value* newValue = loopHeader->appendNew(
+        proc, Add, Origin(), valueInLoop,
+        loopHeader->appendNew(proc, Origin(), 1));
+    loopHeader->appendNew(proc, EntrySwitch, Origin());
+    loopHeader->appendSuccessor(end);
+    loopHeader->appendSuccessor(loopFooter);
+    
+    loopFooter->appendNew(proc, Origin(), newValue, valueInLoop);
+    loopFooter->appendNew(
+        proc, Branch, Origin(),
+        loopFooter->appendNew(
+            proc, LessThan, Origin(), newValue,
+            loopFooter->appendNew(proc, Origin(), 100)));
+    loopFooter->setSuccessors(loopHeader, end);
+    
+    end->appendNew(proc, Return, Origin(), newValue);
+    
+    prepareForGeneration(proc);
+    
+    CCallHelpers jit(vm);
+    generate(proc, jit);
+    LinkBuffer linkBuffer(*vm, jit, nullptr);
+    CodeLocationLabel labelOne = linkBuffer.locationOf(proc.entrypointLabel(0));
+    CodeLocationLabel labelTwo = linkBuffer.locationOf(proc.entrypointLabel(1));
+    
+    MacroAssemblerCodeRef codeRef = FINALIZE_CODE(linkBuffer, ("testb3 compilation"));
+
+    CHECK(invoke(labelOne, 0) == 1);
+    CHECK(invoke(labelOne, 42) == 43);
+    CHECK(invoke(labelOne, 1000) == 1001);
+    
+    CHECK(invoke(labelTwo, 0) == 100);
+    CHECK(invoke(labelTwo, 42) == 100);
+    CHECK(invoke(labelTwo, 1000) == 1001);
+}
+
+void testSomeEarlyRegister()
+{
+    auto run = [&] (bool succeed) {
+        Procedure proc;
+        
+        BasicBlock* root = proc.addBlock();
+        
+        PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+        bool ranFirstPatchpoint = false;
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                CHECK(params[0].gpr() == GPRInfo::returnValueGPR);
+                ranFirstPatchpoint = true;
+            });
+        
+        Value* arg = patchpoint;
+        
+        patchpoint = root->appendNew(proc, Int32, Origin());
+        patchpoint->appendSomeRegister(arg);
+        if (succeed)
+            patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
+        bool ranSecondPatchpoint = false;
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                if (succeed)
+                    CHECK(params[0].gpr() != params[1].gpr());
+                else
+                    CHECK(params[0].gpr() == params[1].gpr());
+                ranSecondPatchpoint = true;
+            });
+        
+        root->appendNew(proc, Return, Origin(), patchpoint);
+        
+        compile(proc);
+        CHECK(ranFirstPatchpoint);
+        CHECK(ranSecondPatchpoint);
+    };
+    
+    run(true);
+    run(false);
+}
+
+void testBranchBitAndImmFusion(
+    B3::Opcode valueModifier, Type valueType, int64_t constant,
+    Air::Opcode expectedOpcode, Air::Arg::Kind firstKind)
+{
+    // Currently this test should pass on all CPUs. But some CPUs may not support this fused
+    // instruction. It's OK to skip this test on those CPUs.
+    
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* two = proc.addBlock();
+    
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    
+    if (valueModifier != Identity) {
+        if (MemoryValue::accepts(valueModifier))
+            left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+        else
+            left = root->appendNew(proc, valueModifier, valueType, Origin(), left);
+    }
+    
+    root->appendNew(
+        proc, Branch, Origin(),
+        root->appendNew(
+            proc, BitAnd, Origin(), left,
+            root->appendIntConstant(proc, Origin(), valueType, constant)));
+    root->setSuccessors(FrequentedBlock(one), FrequentedBlock(two));
+    
+    one->appendNew(proc, Oops, Origin());
+    two->appendNew(proc, Oops, Origin());
+
+    lowerToAirForTesting(proc);
+
+    // The first basic block must end in a BranchTest64(resCond, tmp, bitImm).
+    Air::Inst terminal = proc.code()[0]->last();
+    CHECK_EQ(terminal.kind.opcode, expectedOpcode);
+    CHECK_EQ(terminal.args[0].kind(), Air::Arg::ResCond);
+    CHECK_EQ(terminal.args[1].kind(), firstKind);
+    CHECK(terminal.args[2].kind() == Air::Arg::BitImm || terminal.args[2].kind() == Air::Arg::BitImm64);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled()
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    
+    root->appendSuccessor(success);
+    root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(42), params[0].gpr());
+            
+            CCallHelpers::Jump jumpToSuccess;
+            if (!params.fallsThroughToSuccessor(0))
+                jumpToSuccess = jit.jump();
+            
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    if (jumpToSuccess.isSet())
+                        jumpToSuccess.linkTo(*labels[0], &jit);
+                });
+        });
+    
+    Vector args;
+    {
+        RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+        fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+        fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+        for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+            args.append(success->appendNew(proc, Origin(), i));
+    }
+
+    {
+        // Now force all values into every available register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        for (Value* v : args)
+            p->append(v, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    {
+        // Now require the original patchpoint to be materialized into a register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        p->append(patchpoint, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    success->appendNew(proc, Return, Origin(), success->appendNew(proc, Origin(), 10));
+    
+    slowPath->appendNew(proc, Return, Origin(), slowPath->appendNew(proc, Origin(), 20));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 10);
+}
+
+void testTerminalPatchpointThatNeedsToBeSpilled2()
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* one = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+
+    root->appendNew(
+        proc, Branch, Origin(), arg);
+    root->appendSuccessor(one);
+    root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    PatchpointValue* patchpoint = one->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    patchpoint->append(arg, ValueRep::SomeRegister);
+    
+    one->appendSuccessor(success);
+    one->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            jit.move(CCallHelpers::TrustedImm32(666), params[0].gpr());
+            auto goToFastPath = jit.branch32(CCallHelpers::Equal, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+            auto jumpToSlow = jit.jump();
+            
+            // Make sure the asserts here pass.
+            params.fallsThroughToSuccessor(0);
+            params.fallsThroughToSuccessor(1);
+
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    goToFastPath.linkTo(*labels[0], &jit);
+                    jumpToSlow.linkTo(*labels[1], &jit);
+                });
+        });
+    
+    Vector args;
+    {
+        RegisterSet fillAllGPRsSet = RegisterSet::allGPRs();
+        fillAllGPRsSet.exclude(RegisterSet::stackRegisters());
+        fillAllGPRsSet.exclude(RegisterSet::reservedHardwareRegisters());
+
+        for (unsigned i = 0; i < fillAllGPRsSet.numberOfSetRegisters(); i++)
+            args.append(success->appendNew(proc, Origin(), i));
+    }
+
+    {
+        // Now force all values into every available register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        for (Value* v : args)
+            p->append(v, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    {
+        // Now require the original patchpoint to be materialized into a register.
+        PatchpointValue* p = success->appendNew(proc, Void, Origin());
+        p->append(patchpoint, ValueRep::SomeRegister);
+        p->setGenerator([&] (CCallHelpers&, const StackmapGenerationParams&) { });
+    }
+
+    success->appendNew(proc, Return, Origin(), patchpoint);
+    
+    slowPath->appendNew(proc, Return, Origin(), arg);
+    
+    auto original1 = Options::maxB3TailDupBlockSize();
+    auto original2 = Options::maxB3TailDupBlockSuccessors();
+
+    // Tail duplication will break the critical edge we're trying to test because it
+    // will clone the slowPath block for both edges to it!
+    Options::maxB3TailDupBlockSize() = 0;
+    Options::maxB3TailDupBlockSuccessors() = 0;
+
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1), 1);
+    CHECK_EQ(invoke(*code, 0), 0);
+    CHECK_EQ(invoke(*code, 42), 666);
+
+    Options::maxB3TailDupBlockSize() = original1;
+    Options::maxB3TailDupBlockSuccessors() = original2;
+}
+
+void testPatchpointTerminalReturnValue(bool successIsRare)
+{
+    // This is a unit test for how FTL's heap allocation fast paths behave.
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* success = proc.addBlock();
+    BasicBlock* slowPath = proc.addBlock();
+    BasicBlock* continuation = proc.addBlock();
+    
+    Value* arg = root->appendNew(
+        proc, Trunc, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    
+    PatchpointValue* patchpoint = root->appendNew(proc, Int32, Origin());
+    patchpoint->effects.terminal = true;
+    patchpoint->clobber(RegisterSet::macroScratchRegisters());
+    
+    if (successIsRare) {
+        root->appendSuccessor(FrequentedBlock(success, FrequencyClass::Rare));
+        root->appendSuccessor(slowPath);
+    } else {
+        root->appendSuccessor(success);
+        root->appendSuccessor(FrequentedBlock(slowPath, FrequencyClass::Rare));
+    }
+    
+    patchpoint->appendSomeRegister(arg);
+    
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+            AllowMacroScratchRegisterUsage allowScratch(jit);
+            
+            CCallHelpers::Jump jumpToSlow =
+                jit.branch32(CCallHelpers::Above, params[1].gpr(), CCallHelpers::TrustedImm32(42));
+            
+            jit.add32(CCallHelpers::TrustedImm32(31), params[1].gpr(), params[0].gpr());
+            
+            CCallHelpers::Jump jumpToSuccess;
+            if (!params.fallsThroughToSuccessor(0))
+                jumpToSuccess = jit.jump();
+            
+            Vector> labels = params.successorLabels();
+            
+            params.addLatePath(
+                [=] (CCallHelpers& jit) {
+                    jumpToSlow.linkTo(*labels[1], &jit);
+                    if (jumpToSuccess.isSet())
+                        jumpToSuccess.linkTo(*labels[0], &jit);
+                });
+        });
+    
+    UpsilonValue* successUpsilon = success->appendNew(proc, Origin(), patchpoint);
+    success->appendNew(proc, Jump, Origin());
+    success->setSuccessors(continuation);
+    
+    UpsilonValue* slowPathUpsilon = slowPath->appendNew(
+        proc, Origin(), slowPath->appendNew(proc, Origin(), 666));
+    slowPath->appendNew(proc, Jump, Origin());
+    slowPath->setSuccessors(continuation);
+    
+    Value* phi = continuation->appendNew(proc, Phi, Int32, Origin());
+    successUpsilon->setPhi(phi);
+    slowPathUpsilon->setPhi(phi);
+    continuation->appendNew(proc, Return, Origin(), phi);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 0), 31);
+    CHECK_EQ(invoke(*code, 1), 32);
+    CHECK_EQ(invoke(*code, 41), 72);
+    CHECK_EQ(invoke(*code, 42), 73);
+    CHECK_EQ(invoke(*code, 43), 666);
+    CHECK_EQ(invoke(*code, -1), 666);
+}
+
+void testMemoryFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    if (isX86())
+        checkUsesInstruction(*code, "lock or $0x0, (%rsp)");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ish");
+    checkDoesNotUseInstruction(*code, "mfence");
+    checkDoesNotUseInstruction(*code, "dmb    ishst");
+}
+
+void testStoreFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin(), HeapRange::top(), HeapRange());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    checkDoesNotUseInstruction(*code, "lock");
+    checkDoesNotUseInstruction(*code, "mfence");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ishst");
+}
+
+void testLoadFence()
+{
+    Procedure proc;
+    
+    BasicBlock* root = proc.addBlock();
+    
+    root->appendNew(proc, Origin(), HeapRange(), HeapRange::top());
+    root->appendNew(proc, Return, Origin(), root->appendIntConstant(proc, Origin(), Int32, 42));
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code), 42);
+    checkDoesNotUseInstruction(*code, "lock");
+    checkDoesNotUseInstruction(*code, "mfence");
+    if (isARM64())
+        checkUsesInstruction(*code, "dmb    ish");
+    checkDoesNotUseInstruction(*code, "dmb    ishst");
+}
+
+void testTrappingLoad()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    MemoryValue* value = root->appendNew(
+        proc, trapping(Load), Int32, Origin(),
+        root->appendNew(proc, Origin(), &x));
+    Effects expectedEffects;
+    expectedEffects.exitsSideways = true;
+    expectedEffects.controlDependent= true;
+    expectedEffects.reads = HeapRange::top();
+    CHECK_EQ(value->range(), HeapRange::top());
+    CHECK_EQ(value->effects(), expectedEffects);
+    value->setRange(HeapRange(0));
+    CHECK_EQ(value->range(), HeapRange(0));
+    CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+    root->appendNew(proc, Return, Origin(), value);
+    CHECK_EQ(compileAndRun(proc), 42);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStore()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    MemoryValue* value = root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 111),
+        root->appendNew(proc, Origin(), &x));
+    Effects expectedEffects;
+    expectedEffects.exitsSideways = true;
+    expectedEffects.controlDependent= true;
+    expectedEffects.reads = HeapRange::top();
+    expectedEffects.writes = HeapRange::top();
+    CHECK_EQ(value->range(), HeapRange::top());
+    CHECK_EQ(value->effects(), expectedEffects);
+    value->setRange(HeapRange(0));
+    CHECK_EQ(value->range(), HeapRange(0));
+    expectedEffects.writes = HeapRange(0);
+    CHECK_EQ(value->effects(), expectedEffects); // We still reads top!
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    CHECK_EQ(x, 111);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingLoadAddStore()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    ConstPtrValue* ptr = root->appendNew(proc, Origin(), &x);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, trapping(Load), Int32, Origin(), ptr),
+            root->appendNew(proc, Origin(), 3)),
+        ptr);
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    CHECK_EQ(x, 45);
+    bool traps = false;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                traps = true;
+        }
+    }
+    CHECK(traps);
+}
+
+void testTrappingLoadDCE()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    root->appendNew(
+        proc, trapping(Load), Int32, Origin(),
+        root->appendNew(proc, Origin(), &x));
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    unsigned trapsCount = 0;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.traps)
+                trapsCount++;
+        }
+    }
+    CHECK_EQ(trapsCount, 1u);
+}
+
+void testTrappingStoreElimination()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    int x = 42;
+    Value* ptr = root->appendNew(proc, Origin(), &x);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 43),
+        ptr);
+    root->appendNew(
+        proc, trapping(Store), Origin(),
+        root->appendNew(proc, Origin(), 44),
+        ptr);
+    root->appendNew(proc, Return, Origin());
+    compileAndRun(proc);
+    unsigned storeCount = 0;
+    for (Value* value : proc.values()) {
+        if (MemoryValue::isStore(value->opcode()))
+            storeCount++;
+    }
+    CHECK_EQ(storeCount, 2u);
+}
+
+void testMoveConstants()
+{
+    auto check = [] (Procedure& proc) {
+        proc.resetReachability();
+        
+        if (shouldBeVerbose()) {
+            dataLog("IR before:\n");
+            dataLog(proc);
+        }
+        
+        moveConstants(proc);
+        
+        if (shouldBeVerbose()) {
+            dataLog("IR after:\n");
+            dataLog(proc);
+        }
+        
+        UseCounts useCounts(proc);
+        unsigned count = 0;
+        for (Value* value : proc.values()) {
+            if (useCounts.numUses(value) && value->hasInt64())
+                count++;
+        }
+        
+        if (count == 1)
+            return;
+        
+        crashLock.lock();
+        dataLog("Fail in testMoveConstants: got more than one Const64:\n");
+        dataLog(proc);
+        CRASH();
+    };
+
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* a = root->appendNew(
+            proc, Load, pointerType(), Origin(), 
+            root->appendNew(proc, Origin(), 0x123412341234));
+        Value* b = root->appendNew(
+            proc, Load, pointerType(), Origin(),
+            root->appendNew(proc, Origin(), 0x123412341334));
+        root->appendNew(proc, Void, Origin(), a, b);
+        root->appendNew(proc, Return, Origin());
+        check(proc);
+    }
+    
+    {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+        Value* x = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* a = root->appendNew(
+            proc, Add, Origin(), x, root->appendNew(proc, Origin(), 0x123412341234));
+        Value* b = root->appendNew(
+            proc, Add, Origin(), x, root->appendNew(proc, Origin(), -0x123412341234));
+        root->appendNew(proc, Void, Origin(), a, b);
+        root->appendNew(proc, Return, Origin());
+        check(proc);
+    }
+}
+
+void testPCOriginMapDoesntInsertNops()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    CCallHelpers::Label watchpointLabel;
+
+    PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            watchpointLabel = jit.watchpointLabel();
+        });
+
+    patchpoint = root->appendNew(proc, Void, Origin());
+    patchpoint->setGenerator(
+        [&] (CCallHelpers& jit, const StackmapGenerationParams&) {
+            CCallHelpers::Label labelIgnoringWatchpoints = jit.labelIgnoringWatchpoints();
+
+            CHECK(watchpointLabel == labelIgnoringWatchpoints);
+        });
+
+    root->appendNew(proc, Return, Origin());
+
+    compile(proc);
+}
+
+void testPinRegisters()
+{
+    auto go = [&] (bool pin) {
+        Procedure proc;
+        RegisterSet csrs;
+        csrs.merge(RegisterSet::calleeSaveRegisters());
+        csrs.exclude(RegisterSet::stackRegisters());
+        if (pin) {
+            csrs.forEach(
+                [&] (Reg reg) {
+                    proc.pinRegister(reg);
+                });
+        }
+        BasicBlock* root = proc.addBlock();
+        Value* a = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* b = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* c = root->appendNew(proc, Origin(), GPRInfo::argumentGPR2);
+        Value* d = root->appendNew(proc, Origin(), GPRInfo::regCS0);
+        root->appendNew(
+            proc, Void, Origin(),
+            root->appendNew(proc, Origin(), static_cast(0x1234)));
+        root->appendNew(
+            proc, Void, Origin(),
+            root->appendNew(proc, Origin(), static_cast(0x1235)),
+            a, b, c);
+        PatchpointValue* patchpoint = root->appendNew(proc, Void, Origin());
+        patchpoint->appendSomeRegister(d);
+        patchpoint->setGenerator(
+            [&] (CCallHelpers&, const StackmapGenerationParams& params) {
+                CHECK_EQ(params[0].gpr(), GPRInfo::regCS0);
+            });
+        root->appendNew(proc, Return, Origin());
+        auto code = compile(proc);
+        bool usesCSRs = false;
+        for (Air::BasicBlock* block : proc.code()) {
+            for (Air::Inst& inst : *block) {
+                if (inst.kind.opcode == Air::Patch && inst.origin == patchpoint)
+                    continue;
+                inst.forEachTmpFast(
+                    [&] (Air::Tmp tmp) {
+                        if (tmp.isReg())
+                            usesCSRs |= csrs.get(tmp.reg());
+                    });
+            }
+        }
+        for (const RegisterAtOffset& regAtOffset : proc.calleeSaveRegisters())
+            usesCSRs |= csrs.get(regAtOffset.reg());
+        CHECK_EQ(usesCSRs, !pin);
+    };
+    
+    go(true);
+    go(false);
+}
+
+void testX86LeaAddAddShlLeft()
+{
+    // Add(Add(Shl(@x, $c), @y), $d)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                root->appendNew(proc, Origin(), 2)),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAddShlRight()
+{
+    // Add(Add(@x, Shl(@y, $c)), $d)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+            root->appendNew(
+                proc, Shl, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                root->appendNew(proc, Origin(), 2))),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea 0x64(%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + (2 << 2)) + 100);
+}
+
+void testX86LeaAddAdd()
+{
+    // Add(Add(@x, @y), $c)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Add, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR0)),
+        root->appendNew(proc, Origin(), 100));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkDisassembly(
+        *code,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, "lea 0x64(%rdi,%rsi), %rax")
+                || strstr(disassembly, "lea 0x64(%rsi,%rdi), %rax");
+        },
+        "Expected to find something like lea 0x64(%rdi,%rsi), %rax but didn't!");
+    CHECK_EQ(invoke(*code, 1, 2), (1 + 2) + 100);
+}
+
+void testX86LeaAddShlRight()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 2)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale1()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 0)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkDisassembly(
+        *code,
+        [&] (const char* disassembly) -> bool {
+            return strstr(disassembly, "lea (%rdi,%rsi), %rax")
+                || strstr(disassembly, "lea (%rsi,%rdi), %rax");
+        },
+        "Expected to find something like lea (%rdi,%rsi), %rax but didn't!");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + 2);
+}
+
+void testX86LeaAddShlLeftScale2()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 1)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,2), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testX86LeaAddShlLeftScale4()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 2)),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,4), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 2));
+}
+
+void testX86LeaAddShlLeftScale8()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 3)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    checkUsesInstruction(*code, "lea (%rdi,%rsi,8), %rax");
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 3));
+}
+
+void testAddShl32()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 32)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (static_cast(2) << static_cast(32)));
+}
+
+void testAddShl64()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 64)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + 2);
+}
+
+void testAddShl65()
+{
+    // Add(Shl(@x, $c), @y)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* result = root->appendNew(
+        proc, Add, Origin(),
+        root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+        root->appendNew(
+            proc, Shl, Origin(),
+            root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+            root->appendNew(proc, Origin(), 65)));
+    root->appendNew(proc, Return, Origin(), result);
+    
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2), 1 + (2 << 1));
+}
+
+void testReduceStrengthReassociation(bool flip)
+{
+    // Add(Add(@x, $c), @y) -> Add(Add(@x, @y), $c)
+    // and
+    // Add(@y, Add(@x, $c)) -> Add(Add(@x, @y), $c)
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    Value* arg1 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* arg2 = root->appendNew(proc, Origin(), GPRInfo::argumentGPR1);
+    
+    Value* innerAdd = root->appendNew(
+        proc, Add, Origin(), arg1,
+        root->appendNew(proc, Origin(), 42));
+    
+    Value* outerAdd;
+    if (flip)
+        outerAdd = root->appendNew(proc, Add, Origin(), arg2, innerAdd);
+    else
+        outerAdd = root->appendNew(proc, Add, Origin(), innerAdd, arg2);
+    
+    root->appendNew(proc, Return, Origin(), outerAdd);
+    
+    proc.resetReachability();
+
+    if (shouldBeVerbose()) {
+        dataLog("IR before reduceStrength:\n");
+        dataLog(proc);
+    }
+    
+    reduceStrength(proc);
+    
+    if (shouldBeVerbose()) {
+        dataLog("IR after reduceStrength:\n");
+        dataLog(proc);
+    }
+    
+    CHECK_EQ(root->last()->opcode(), Return);
+    CHECK_EQ(root->last()->child(0)->opcode(), Add);
+    CHECK(root->last()->child(0)->child(1)->isIntPtr(42));
+    CHECK_EQ(root->last()->child(0)->child(0)->opcode(), Add);
+    CHECK(
+        (root->last()->child(0)->child(0)->child(0) == arg1 && root->last()->child(0)->child(0)->child(1) == arg2) ||
+        (root->last()->child(0)->child(0)->child(0) == arg2 && root->last()->child(0)->child(0)->child(1) == arg1));
+}
+
+void testLoadBaseIndexShift2()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(
+                    proc, Shl, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                    root->appendNew(proc, Origin(), 2)))));
+    auto code = compile(proc);
+    if (isX86())
+        checkUsesInstruction(*code, "(%rdi,%rsi,4)");
+    int32_t value = 12341234;
+    char* ptr = bitwise_cast(&value);
+    for (unsigned i = 0; i < 10; ++i)
+        CHECK_EQ(invoke(*code, ptr - (static_cast(1) << static_cast(2)) * i, i), 12341234);
+}
+
+void testLoadBaseIndexShift32()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Return, Origin(),
+        root->appendNew(
+            proc, Load, Int32, Origin(),
+            root->appendNew(
+                proc, Add, Origin(),
+                root->appendNew(proc, Origin(), GPRInfo::argumentGPR0),
+                root->appendNew(
+                    proc, Shl, Origin(),
+                    root->appendNew(proc, Origin(), GPRInfo::argumentGPR1),
+                    root->appendNew(proc, Origin(), 32)))));
+    auto code = compile(proc);
+    int32_t value = 12341234;
+    char* ptr = bitwise_cast(&value);
+    for (unsigned i = 0; i < 10; ++i)
+        CHECK_EQ(invoke(*code, ptr - (static_cast(1) << static_cast(32)) * i, i), 12341234);
+}
+
+void testOptimizeMaterialization()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+    root->appendNew(
+        proc, Void, Origin(),
+        root->appendNew(proc, Origin(), 0x123423453456llu),
+        root->appendNew(proc, Origin(), 0x123423453456llu + 35));
+    root->appendNew(proc, Return, Origin());
+    
+    auto code = compile(proc);
+    bool found = false;
+    for (Air::BasicBlock* block : proc.code()) {
+        for (Air::Inst& inst : *block) {
+            if (inst.kind.opcode != Air::Add64)
+                continue;
+            if (inst.args[0] != Air::Arg::imm(35))
+                continue;
+            found = true;
+        }
+    }
+    CHECK(found);
+}
+
+void testWasmBoundsCheck(unsigned offset)
+{
+    Procedure proc;
+    GPRReg pinned = GPRInfo::argumentGPR1;
+    proc.pinRegister(pinned);
+
+    proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR, unsigned actualOffset) {
+        CHECK_EQ(pinnedGPR, pinned);
+        CHECK_EQ(actualOffset, offset);
+
+        // This should always work because a function this simple should never have callee
+        // saves.
+        jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
+        jit.emitFunctionEpilogue();
+        jit.ret();
+    });
+
+    BasicBlock* root = proc.addBlock();
+    Value* left = root->appendNew(proc, Origin(), GPRInfo::argumentGPR0);
+    if (pointerType() != Int32)
+        left = root->appendNew(proc, Trunc, Origin(), left);
+    root->appendNew(proc, Origin(), left, pinned, offset);
+    Value* result = root->appendNew(proc, Origin(), 0x42);
+    root->appendNewControlValue(proc, Return, Origin(), result);
+
+    auto code = compile(proc);
+    CHECK_EQ(invoke(*code, 1, 2 + offset), 0x42);
+    CHECK_EQ(invoke(*code, 3, 2 + offset), 42);
+    CHECK_EQ(invoke(*code, 2, 2 + offset), 42);
+}
+
+void testWasmAddress()
+{
+    Procedure proc;
+    GPRReg pinnedGPR = GPRInfo::argumentGPR2;
+    proc.pinRegister(pinnedGPR);
+
+    unsigned loopCount = 100;
+    Vector values(loopCount);
+    unsigned numToStore = 42;
+
+    BasicBlock* root = proc.addBlock();
+    BasicBlock* header = proc.addBlock();
+    BasicBlock* body = proc.addBlock();
+    BasicBlock* continuation = proc.addBlock();
+
+    // Root
+    Value* loopCountValue = root->appendNew(proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR0));
+    Value* valueToStore = root->appendNew(proc, Trunc, Origin(), root->appendNew(proc, Origin(), GPRInfo::argumentGPR1));
+    UpsilonValue* beginUpsilon = root->appendNew(proc, Origin(), root->appendNew(proc, Origin(), 0));
+    root->appendNewControlValue(proc, Jump, Origin(), header);
+
+    // Header
+    Value* indexPhi = header->appendNew(proc, Phi, Int32, Origin());
+    header->appendNewControlValue(proc, Branch, Origin(),
+        header->appendNew(proc, Below, Origin(), indexPhi, loopCountValue),
+        body, continuation);
+
+    // Body
+    Value* pointer = body->appendNew(proc, Mul, Origin(), indexPhi,
+        body->appendNew(proc, Origin(), sizeof(unsigned)));
+    pointer = body->appendNew(proc, ZExt32, Origin(), pointer);
+    body->appendNew(proc, Store, Origin(), valueToStore,
+        body->appendNew(proc, Origin(), pointer, pinnedGPR));
+    UpsilonValue* incUpsilon = body->appendNew(proc, Origin(),
+        body->appendNew(proc, Add, Origin(), indexPhi,
+            body->appendNew(proc, Origin(), 1)));
+    body->appendNewControlValue(proc, Jump, Origin(), header);
+
+    // Continuation
+    continuation->appendNewControlValue(proc, Return, Origin());
+
+    beginUpsilon->setPhi(indexPhi);
+    incUpsilon->setPhi(indexPhi);
+
+
+    auto code = compile(proc);
+    invoke(*code, loopCount, numToStore, values.data());
+    for (unsigned value : values)
+        CHECK_EQ(numToStore, value);
+}
+
+// Make sure the compiler does not try to optimize anything out.
+NEVER_INLINE double zero()
+{
+    return 0.;
+}
+
+double negativeZero()
+{
+    return -zero();
+}
+
+#define RUN(test) do {                          \
+        if (!shouldRun(#test))                  \
+            break;                              \
+        tasks.append(                           \
+            createSharedTask(           \
+                [&] () {                        \
+                    dataLog(#test "...\n");     \
+                    test;                       \
+                    dataLog(#test ": OK!\n");   \
+                }));                            \
+    } while (false);
+
+#define RUN_UNARY(test, values) \
+    for (auto a : values) {                             \
+        CString testStr = toCString(#test, "(", a.name, ")"); \
+        if (!shouldRun(testStr.data()))                 \
+            continue;                                   \
+        tasks.append(createSharedTask(          \
+            [=] () {                                    \
+                dataLog(toCString(testStr, "...\n"));   \
+                test(a.value);                          \
+                dataLog(toCString(testStr, ": OK!\n")); \
+            }));                                        \
+    }
+
+#define RUN_BINARY(test, valuesA, valuesB) \
+    for (auto a : valuesA) {                                \
+        for (auto b : valuesB) {                            \
+            CString testStr = toCString(#test, "(", a.name, ", ", b.name, ")"); \
+            if (!shouldRun(testStr.data()))                 \
+                continue;                                   \
+            tasks.append(createSharedTask(          \
+                [=] () {                                    \
+                    dataLog(toCString(testStr, "...\n"));   \
+                    test(a.value, b.value);                 \
+                    dataLog(toCString(testStr, ": OK!\n")); \
+                }));                                        \
+        }                                                   \
+    }
+
+void run(const char* filter)
+{
+    JSC::initializeThreading();
+    vm = &VM::create(LargeHeap).leakRef();
+
+    Deque>> tasks;
+
+    auto shouldRun = [&] (const char* testName) -> bool {
+        return !filter || !!strcasestr(testName, filter);
+    };
+
+    // We run this test first because it fiddles with some
+    // JSC options.
+    testTerminalPatchpointThatNeedsToBeSpilled2();
+
+    RUN(test42());
+    RUN(testLoad42());
+    RUN(testLoadOffsetImm9Max());
+    RUN(testLoadOffsetImm9MaxPlusOne());
+    RUN(testLoadOffsetImm9MaxPlusTwo());
+    RUN(testLoadOffsetImm9Min());
+    RUN(testLoadOffsetImm9MinMinusOne());
+    RUN(testLoadOffsetScaledUnsignedImm12Max());
+    RUN(testLoadOffsetScaledUnsignedOverImm12Max());
+    RUN(testArg(43));
+    RUN(testReturnConst64(5));
+    RUN(testReturnConst64(-42));
+    RUN(testReturnVoid());
+
+    RUN(testAddArg(111));
+    RUN(testAddArgs(1, 1));
+    RUN(testAddArgs(1, 2));
+    RUN(testAddArgImm(1, 2));
+    RUN(testAddArgImm(0, 2));
+    RUN(testAddArgImm(1, 0));
+    RUN(testAddImmArg(1, 2));
+    RUN(testAddImmArg(0, 2));
+    RUN(testAddImmArg(1, 0));
+    RUN_BINARY(testAddArgMem, int64Operands(), int64Operands());
+    RUN_BINARY(testAddMemArg, int64Operands(), int64Operands());
+    RUN_BINARY(testAddImmMem, int64Operands(), int64Operands());
+    RUN_UNARY(testAddArg32, int32Operands());
+    RUN(testAddArgs32(1, 1));
+    RUN(testAddArgs32(1, 2));
+    RUN_BINARY(testAddArgMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testAddMemArg32, int32Operands(), int32Operands());
+    RUN_BINARY(testAddImmMem32, int32Operands(), int32Operands());
+    RUN(testAddArgZeroImmZDef());
+    RUN(testAddLoadTwice());
+
+    RUN(testAddArgDouble(M_PI));
+    RUN(testAddArgsDouble(M_PI, 1));
+    RUN(testAddArgsDouble(M_PI, -M_PI));
+    RUN(testAddArgImmDouble(M_PI, 1));
+    RUN(testAddArgImmDouble(M_PI, 0));
+    RUN(testAddArgImmDouble(M_PI, negativeZero()));
+    RUN(testAddArgImmDouble(0, 0));
+    RUN(testAddArgImmDouble(0, negativeZero()));
+    RUN(testAddArgImmDouble(negativeZero(), 0));
+    RUN(testAddArgImmDouble(negativeZero(), negativeZero()));
+    RUN(testAddImmArgDouble(M_PI, 1));
+    RUN(testAddImmArgDouble(M_PI, 0));
+    RUN(testAddImmArgDouble(M_PI, negativeZero()));
+    RUN(testAddImmArgDouble(0, 0));
+    RUN(testAddImmArgDouble(0, negativeZero()));
+    RUN(testAddImmArgDouble(negativeZero(), 0));
+    RUN(testAddImmArgDouble(negativeZero(), negativeZero()));
+    RUN(testAddImmsDouble(M_PI, 1));
+    RUN(testAddImmsDouble(M_PI, 0));
+    RUN(testAddImmsDouble(M_PI, negativeZero()));
+    RUN(testAddImmsDouble(0, 0));
+    RUN(testAddImmsDouble(0, negativeZero()));
+    RUN(testAddImmsDouble(negativeZero(), negativeZero()));
+    RUN_UNARY(testAddArgFloat, floatingPointOperands());
+    RUN_BINARY(testAddArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddFPRArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testAddArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testAddArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testAddArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testMulArg(5));
+    RUN(testMulAddArg(5));
+    RUN(testMulAddArg(85));
+    RUN(testMulArgStore(5));
+    RUN(testMulArgStore(85));
+    RUN(testMulArgs(1, 1));
+    RUN(testMulArgs(1, 2));
+    RUN(testMulArgs(3, 3));
+    RUN(testMulArgImm(1, 2));
+    RUN(testMulArgImm(1, 4));
+    RUN(testMulArgImm(1, 8));
+    RUN(testMulArgImm(1, 16));
+    RUN(testMulArgImm(1, 0x80000000llu));
+    RUN(testMulArgImm(1, 0x800000000000llu));
+    RUN(testMulArgImm(7, 2));
+    RUN(testMulArgImm(7, 4));
+    RUN(testMulArgImm(7, 8));
+    RUN(testMulArgImm(7, 16));
+    RUN(testMulArgImm(7, 0x80000000llu));
+    RUN(testMulArgImm(7, 0x800000000000llu));
+    RUN(testMulArgImm(-42, 2));
+    RUN(testMulArgImm(-42, 4));
+    RUN(testMulArgImm(-42, 8));
+    RUN(testMulArgImm(-42, 16));
+    RUN(testMulArgImm(-42, 0x80000000llu));
+    RUN(testMulArgImm(-42, 0x800000000000llu));
+    RUN(testMulArgImm(0, 2));
+    RUN(testMulArgImm(1, 0));
+    RUN(testMulArgImm(3, 3));
+    RUN(testMulArgImm(3, -1));
+    RUN(testMulArgImm(-3, -1));
+    RUN(testMulArgImm(0, -1));
+    RUN(testMulImmArg(1, 2));
+    RUN(testMulImmArg(0, 2));
+    RUN(testMulImmArg(1, 0));
+    RUN(testMulImmArg(3, 3));
+    RUN(testMulArgs32(1, 1));
+    RUN(testMulArgs32(1, 2));
+    RUN(testMulLoadTwice());
+    RUN(testMulAddArgsLeft());
+    RUN(testMulAddArgsRight());
+    RUN(testMulAddArgsLeft32());
+    RUN(testMulAddArgsRight32());
+    RUN(testMulSubArgsLeft());
+    RUN(testMulSubArgsRight());
+    RUN(testMulSubArgsLeft32());
+    RUN(testMulSubArgsRight32());
+    RUN(testMulNegArgs());
+    RUN(testMulNegArgs32());
+
+    RUN_UNARY(testMulArgDouble, floatingPointOperands());
+    RUN_BINARY(testMulArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testMulArgFloat, floatingPointOperands());
+    RUN_BINARY(testMulArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testMulArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testMulArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testMulArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testDivArgDouble(M_PI));
+    RUN(testDivArgsDouble(M_PI, 1));
+    RUN(testDivArgsDouble(M_PI, -M_PI));
+    RUN(testDivArgImmDouble(M_PI, 1));
+    RUN(testDivArgImmDouble(M_PI, 0));
+    RUN(testDivArgImmDouble(M_PI, negativeZero()));
+    RUN(testDivArgImmDouble(0, 0));
+    RUN(testDivArgImmDouble(0, negativeZero()));
+    RUN(testDivArgImmDouble(negativeZero(), 0));
+    RUN(testDivArgImmDouble(negativeZero(), negativeZero()));
+    RUN(testDivImmArgDouble(M_PI, 1));
+    RUN(testDivImmArgDouble(M_PI, 0));
+    RUN(testDivImmArgDouble(M_PI, negativeZero()));
+    RUN(testDivImmArgDouble(0, 0));
+    RUN(testDivImmArgDouble(0, negativeZero()));
+    RUN(testDivImmArgDouble(negativeZero(), 0));
+    RUN(testDivImmArgDouble(negativeZero(), negativeZero()));
+    RUN(testDivImmsDouble(M_PI, 1));
+    RUN(testDivImmsDouble(M_PI, 0));
+    RUN(testDivImmsDouble(M_PI, negativeZero()));
+    RUN(testDivImmsDouble(0, 0));
+    RUN(testDivImmsDouble(0, negativeZero()));
+    RUN(testDivImmsDouble(negativeZero(), negativeZero()));
+    RUN_UNARY(testDivArgFloat, floatingPointOperands());
+    RUN_BINARY(testDivArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testDivArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testDivArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testDivArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN_BINARY(testUDivArgsInt32, int32Operands(), int32Operands());
+    RUN_BINARY(testUDivArgsInt64, int64Operands(), int64Operands());
+
+    RUN_UNARY(testModArgDouble, floatingPointOperands());
+    RUN_BINARY(testModArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testModArgFloat, floatingPointOperands());
+    RUN_BINARY(testModArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testModImmsFloat, floatingPointOperands(), floatingPointOperands());
+
+    RUN_BINARY(testUModArgsInt32, int32Operands(), int32Operands());
+    RUN_BINARY(testUModArgsInt64, int64Operands(), int64Operands());
+
+    RUN(testSubArg(24));
+    RUN(testSubArgs(1, 1));
+    RUN(testSubArgs(1, 2));
+    RUN(testSubArgs(13, -42));
+    RUN(testSubArgs(-13, 42));
+    RUN(testSubArgImm(1, 1));
+    RUN(testSubArgImm(1, 2));
+    RUN(testSubArgImm(13, -42));
+    RUN(testSubArgImm(-13, 42));
+    RUN(testSubArgImm(42, 0));
+    RUN(testSubImmArg(1, 1));
+    RUN(testSubImmArg(1, 2));
+    RUN(testSubImmArg(13, -42));
+    RUN(testSubImmArg(-13, 42));
+    RUN_BINARY(testSubArgMem, int64Operands(), int64Operands());
+    RUN_BINARY(testSubMemArg, int64Operands(), int64Operands());
+    RUN_BINARY(testSubImmMem, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemImm, int32Operands(), int32Operands());
+    RUN_UNARY(testNegValueSubOne, int32Operands());
+
+    RUN(testSubArgs32(1, 1));
+    RUN(testSubArgs32(1, 2));
+    RUN(testSubArgs32(13, -42));
+    RUN(testSubArgs32(-13, 42));
+    RUN(testSubArgImm32(1, 1));
+    RUN(testSubArgImm32(1, 2));
+    RUN(testSubArgImm32(13, -42));
+    RUN(testSubArgImm32(-13, 42));
+    RUN(testSubImmArg32(1, 1));
+    RUN(testSubImmArg32(1, 2));
+    RUN(testSubImmArg32(13, -42));
+    RUN(testSubImmArg32(-13, 42));
+    RUN_BINARY(testSubArgMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemArg32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubImmMem32, int32Operands(), int32Operands());
+    RUN_BINARY(testSubMemImm32, int32Operands(), int32Operands());
+    RUN_UNARY(testNegValueSubOne32, int64Operands());
+
+    RUN_UNARY(testSubArgDouble, floatingPointOperands());
+    RUN_BINARY(testSubArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmArgDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testSubArgFloat, floatingPointOperands());
+    RUN_BINARY(testSubArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmArgFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testSubArgFloatWithUselessDoubleConversion, floatingPointOperands());
+    RUN_BINARY(testSubArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSubArgsFloatWithEffectfulDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN_UNARY(testNegDouble, floatingPointOperands());
+    RUN_UNARY(testNegFloat, floatingPointOperands());
+    RUN_UNARY(testNegFloatWithUselessDoubleConversion, floatingPointOperands());
+
+    RUN(testBitAndArgs(43, 43));
+    RUN(testBitAndArgs(43, 0));
+    RUN(testBitAndArgs(10, 3));
+    RUN(testBitAndArgs(42, 0xffffffffffffffff));
+    RUN(testBitAndSameArg(43));
+    RUN(testBitAndSameArg(0));
+    RUN(testBitAndSameArg(3));
+    RUN(testBitAndSameArg(0xffffffffffffffff));
+    RUN(testBitAndImms(43, 43));
+    RUN(testBitAndImms(43, 0));
+    RUN(testBitAndImms(10, 3));
+    RUN(testBitAndImms(42, 0xffffffffffffffff));
+    RUN(testBitAndArgImm(43, 43));
+    RUN(testBitAndArgImm(43, 0));
+    RUN(testBitAndArgImm(10, 3));
+    RUN(testBitAndArgImm(42, 0xffffffffffffffff));
+    RUN(testBitAndArgImm(42, 0xff));
+    RUN(testBitAndArgImm(300, 0xff));
+    RUN(testBitAndArgImm(-300, 0xff));
+    RUN(testBitAndArgImm(42, 0xffff));
+    RUN(testBitAndArgImm(40000, 0xffff));
+    RUN(testBitAndArgImm(-40000, 0xffff));
+    RUN(testBitAndImmArg(43, 43));
+    RUN(testBitAndImmArg(43, 0));
+    RUN(testBitAndImmArg(10, 3));
+    RUN(testBitAndImmArg(42, 0xffffffffffffffff));
+    RUN(testBitAndBitAndArgImmImm(2, 7, 3));
+    RUN(testBitAndBitAndArgImmImm(1, 6, 6));
+    RUN(testBitAndBitAndArgImmImm(0xffff, 24, 7));
+    RUN(testBitAndImmBitAndArgImm(7, 2, 3));
+    RUN(testBitAndImmBitAndArgImm(6, 1, 6));
+    RUN(testBitAndImmBitAndArgImm(24, 0xffff, 7));
+    RUN(testBitAndArgs32(43, 43));
+    RUN(testBitAndArgs32(43, 0));
+    RUN(testBitAndArgs32(10, 3));
+    RUN(testBitAndArgs32(42, 0xffffffff));
+    RUN(testBitAndSameArg32(43));
+    RUN(testBitAndSameArg32(0));
+    RUN(testBitAndSameArg32(3));
+    RUN(testBitAndSameArg32(0xffffffff));
+    RUN(testBitAndImms32(43, 43));
+    RUN(testBitAndImms32(43, 0));
+    RUN(testBitAndImms32(10, 3));
+    RUN(testBitAndImms32(42, 0xffffffff));
+    RUN(testBitAndArgImm32(43, 43));
+    RUN(testBitAndArgImm32(43, 0));
+    RUN(testBitAndArgImm32(10, 3));
+    RUN(testBitAndArgImm32(42, 0xffffffff));
+    RUN(testBitAndImmArg32(43, 43));
+    RUN(testBitAndImmArg32(43, 0));
+    RUN(testBitAndImmArg32(10, 3));
+    RUN(testBitAndImmArg32(42, 0xffffffff));
+    RUN(testBitAndImmArg32(42, 0xff));
+    RUN(testBitAndImmArg32(300, 0xff));
+    RUN(testBitAndImmArg32(-300, 0xff));
+    RUN(testBitAndImmArg32(42, 0xffff));
+    RUN(testBitAndImmArg32(40000, 0xffff));
+    RUN(testBitAndImmArg32(-40000, 0xffff));
+    RUN(testBitAndBitAndArgImmImm32(2, 7, 3));
+    RUN(testBitAndBitAndArgImmImm32(1, 6, 6));
+    RUN(testBitAndBitAndArgImmImm32(0xffff, 24, 7));
+    RUN(testBitAndImmBitAndArgImm32(7, 2, 3));
+    RUN(testBitAndImmBitAndArgImm32(6, 1, 6));
+    RUN(testBitAndImmBitAndArgImm32(24, 0xffff, 7));
+    RUN_BINARY(testBitAndWithMaskReturnsBooleans, int64Operands(), int64Operands());
+    RUN_UNARY(testBitAndArgDouble, floatingPointOperands());
+    RUN_BINARY(testBitAndArgsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgImmDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndImmsDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testBitAndArgFloat, floatingPointOperands());
+    RUN_BINARY(testBitAndArgsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgImmFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndImmsFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBitAndArgsFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+
+    RUN(testBitOrArgs(43, 43));
+    RUN(testBitOrArgs(43, 0));
+    RUN(testBitOrArgs(10, 3));
+    RUN(testBitOrArgs(42, 0xffffffffffffffff));
+    RUN(testBitOrSameArg(43));
+    RUN(testBitOrSameArg(0));
+    RUN(testBitOrSameArg(3));
+    RUN(testBitOrSameArg(0xffffffffffffffff));
+    RUN(testBitOrImms(43, 43));
+    RUN(testBitOrImms(43, 0));
+    RUN(testBitOrImms(10, 3));
+    RUN(testBitOrImms(42, 0xffffffffffffffff));
+    RUN(testBitOrArgImm(43, 43));
+    RUN(testBitOrArgImm(43, 0));
+    RUN(testBitOrArgImm(10, 3));
+    RUN(testBitOrArgImm(42, 0xffffffffffffffff));
+    RUN(testBitOrImmArg(43, 43));
+    RUN(testBitOrImmArg(43, 0));
+    RUN(testBitOrImmArg(10, 3));
+    RUN(testBitOrImmArg(42, 0xffffffffffffffff));
+    RUN(testBitOrBitOrArgImmImm(2, 7, 3));
+    RUN(testBitOrBitOrArgImmImm(1, 6, 6));
+    RUN(testBitOrBitOrArgImmImm(0xffff, 24, 7));
+    RUN(testBitOrImmBitOrArgImm(7, 2, 3));
+    RUN(testBitOrImmBitOrArgImm(6, 1, 6));
+    RUN(testBitOrImmBitOrArgImm(24, 0xffff, 7));
+    RUN(testBitOrArgs32(43, 43));
+    RUN(testBitOrArgs32(43, 0));
+    RUN(testBitOrArgs32(10, 3));
+    RUN(testBitOrArgs32(42, 0xffffffff));
+    RUN(testBitOrSameArg32(43));
+    RUN(testBitOrSameArg32(0));
+    RUN(testBitOrSameArg32(3));
+    RUN(testBitOrSameArg32(0xffffffff));
+    RUN(testBitOrImms32(43, 43));
+    RUN(testBitOrImms32(43, 0));
+    RUN(testBitOrImms32(10, 3));
+    RUN(testBitOrImms32(42, 0xffffffff));
+    RUN(testBitOrArgImm32(43, 43));
+    RUN(testBitOrArgImm32(43, 0));
+    RUN(testBitOrArgImm32(10, 3));
+    RUN(testBitOrArgImm32(42, 0xffffffff));
+    RUN(testBitOrImmArg32(43, 43));
+    RUN(testBitOrImmArg32(43, 0));
+    RUN(testBitOrImmArg32(10, 3));
+    RUN(testBitOrImmArg32(42, 0xffffffff));
+    RUN(testBitOrBitOrArgImmImm32(2, 7, 3));
+    RUN(testBitOrBitOrArgImmImm32(1, 6, 6));
+    RUN(testBitOrBitOrArgImmImm32(0xffff, 24, 7));
+    RUN(testBitOrImmBitOrArgImm32(7, 2, 3));
+    RUN(testBitOrImmBitOrArgImm32(6, 1, 6));
+    RUN(testBitOrImmBitOrArgImm32(24, 0xffff, 7));
+
+    RUN_BINARY(testBitXorArgs, int64Operands(), int64Operands());
+    RUN_UNARY(testBitXorSameArg, int64Operands());
+    RUN_BINARY(testBitXorImms, int64Operands(), int64Operands());
+    RUN_BINARY(testBitXorArgImm, int64Operands(), int64Operands());
+    RUN_BINARY(testBitXorImmArg, int64Operands(), int64Operands());
+    RUN(testBitXorBitXorArgImmImm(2, 7, 3));
+    RUN(testBitXorBitXorArgImmImm(1, 6, 6));
+    RUN(testBitXorBitXorArgImmImm(0xffff, 24, 7));
+    RUN(testBitXorImmBitXorArgImm(7, 2, 3));
+    RUN(testBitXorImmBitXorArgImm(6, 1, 6));
+    RUN(testBitXorImmBitXorArgImm(24, 0xffff, 7));
+    RUN(testBitXorArgs32(43, 43));
+    RUN(testBitXorArgs32(43, 0));
+    RUN(testBitXorArgs32(10, 3));
+    RUN(testBitXorArgs32(42, 0xffffffff));
+    RUN(testBitXorSameArg32(43));
+    RUN(testBitXorSameArg32(0));
+    RUN(testBitXorSameArg32(3));
+    RUN(testBitXorSameArg32(0xffffffff));
+    RUN(testBitXorImms32(43, 43));
+    RUN(testBitXorImms32(43, 0));
+    RUN(testBitXorImms32(10, 3));
+    RUN(testBitXorImms32(42, 0xffffffff));
+    RUN(testBitXorArgImm32(43, 43));
+    RUN(testBitXorArgImm32(43, 0));
+    RUN(testBitXorArgImm32(10, 3));
+    RUN(testBitXorArgImm32(42, 0xffffffff));
+    RUN(testBitXorImmArg32(43, 43));
+    RUN(testBitXorImmArg32(43, 0));
+    RUN(testBitXorImmArg32(10, 3));
+    RUN(testBitXorImmArg32(42, 0xffffffff));
+    RUN(testBitXorBitXorArgImmImm32(2, 7, 3));
+    RUN(testBitXorBitXorArgImmImm32(1, 6, 6));
+    RUN(testBitXorBitXorArgImmImm32(0xffff, 24, 7));
+    RUN(testBitXorImmBitXorArgImm32(7, 2, 3));
+    RUN(testBitXorImmBitXorArgImm32(6, 1, 6));
+    RUN(testBitXorImmBitXorArgImm32(24, 0xffff, 7));
+
+    RUN_UNARY(testBitNotArg, int64Operands());
+    RUN_UNARY(testBitNotImm, int64Operands());
+    RUN_UNARY(testBitNotMem, int64Operands());
+    RUN_UNARY(testBitNotArg32, int32Operands());
+    RUN_UNARY(testBitNotImm32, int32Operands());
+    RUN_UNARY(testBitNotMem32, int32Operands());
+    RUN_BINARY(testBitNotOnBooleanAndBranch32, int32Operands(), int32Operands());
+
+    RUN(testShlArgs(1, 0));
+    RUN(testShlArgs(1, 1));
+    RUN(testShlArgs(1, 62));
+    RUN(testShlArgs(0xffffffffffffffff, 0));
+    RUN(testShlArgs(0xffffffffffffffff, 1));
+    RUN(testShlArgs(0xffffffffffffffff, 63));
+    RUN(testShlImms(1, 0));
+    RUN(testShlImms(1, 1));
+    RUN(testShlImms(1, 62));
+    RUN(testShlImms(1, 65));
+    RUN(testShlImms(0xffffffffffffffff, 0));
+    RUN(testShlImms(0xffffffffffffffff, 1));
+    RUN(testShlImms(0xffffffffffffffff, 63));
+    RUN(testShlArgImm(1, 0));
+    RUN(testShlArgImm(1, 1));
+    RUN(testShlArgImm(1, 62));
+    RUN(testShlArgImm(1, 65));
+    RUN(testShlArgImm(0xffffffffffffffff, 0));
+    RUN(testShlArgImm(0xffffffffffffffff, 1));
+    RUN(testShlArgImm(0xffffffffffffffff, 63));
+    RUN(testShlArg32(2));
+    RUN(testShlArgs32(1, 0));
+    RUN(testShlArgs32(1, 1));
+    RUN(testShlArgs32(1, 62));
+    RUN(testShlImms32(1, 33));
+    RUN(testShlArgs32(0xffffffff, 0));
+    RUN(testShlArgs32(0xffffffff, 1));
+    RUN(testShlArgs32(0xffffffff, 63));
+    RUN(testShlImms32(1, 0));
+    RUN(testShlImms32(1, 1));
+    RUN(testShlImms32(1, 62));
+    RUN(testShlImms32(1, 33));
+    RUN(testShlImms32(0xffffffff, 0));
+    RUN(testShlImms32(0xffffffff, 1));
+    RUN(testShlImms32(0xffffffff, 63));
+    RUN(testShlArgImm32(1, 0));
+    RUN(testShlArgImm32(1, 1));
+    RUN(testShlArgImm32(1, 62));
+    RUN(testShlArgImm32(0xffffffff, 0));
+    RUN(testShlArgImm32(0xffffffff, 1));
+    RUN(testShlArgImm32(0xffffffff, 63));
+
+    RUN(testSShrArgs(1, 0));
+    RUN(testSShrArgs(1, 1));
+    RUN(testSShrArgs(1, 62));
+    RUN(testSShrArgs(0xffffffffffffffff, 0));
+    RUN(testSShrArgs(0xffffffffffffffff, 1));
+    RUN(testSShrArgs(0xffffffffffffffff, 63));
+    RUN(testSShrImms(1, 0));
+    RUN(testSShrImms(1, 1));
+    RUN(testSShrImms(1, 62));
+    RUN(testSShrImms(1, 65));
+    RUN(testSShrImms(0xffffffffffffffff, 0));
+    RUN(testSShrImms(0xffffffffffffffff, 1));
+    RUN(testSShrImms(0xffffffffffffffff, 63));
+    RUN(testSShrArgImm(1, 0));
+    RUN(testSShrArgImm(1, 1));
+    RUN(testSShrArgImm(1, 62));
+    RUN(testSShrArgImm(1, 65));
+    RUN(testSShrArgImm(0xffffffffffffffff, 0));
+    RUN(testSShrArgImm(0xffffffffffffffff, 1));
+    RUN(testSShrArgImm(0xffffffffffffffff, 63));
+    RUN(testSShrArg32(32));
+    RUN(testSShrArgs32(1, 0));
+    RUN(testSShrArgs32(1, 1));
+    RUN(testSShrArgs32(1, 62));
+    RUN(testSShrArgs32(1, 33));
+    RUN(testSShrArgs32(0xffffffff, 0));
+    RUN(testSShrArgs32(0xffffffff, 1));
+    RUN(testSShrArgs32(0xffffffff, 63));
+    RUN(testSShrImms32(1, 0));
+    RUN(testSShrImms32(1, 1));
+    RUN(testSShrImms32(1, 62));
+    RUN(testSShrImms32(1, 33));
+    RUN(testSShrImms32(0xffffffff, 0));
+    RUN(testSShrImms32(0xffffffff, 1));
+    RUN(testSShrImms32(0xffffffff, 63));
+    RUN(testSShrArgImm32(1, 0));
+    RUN(testSShrArgImm32(1, 1));
+    RUN(testSShrArgImm32(1, 62));
+    RUN(testSShrArgImm32(0xffffffff, 0));
+    RUN(testSShrArgImm32(0xffffffff, 1));
+    RUN(testSShrArgImm32(0xffffffff, 63));
+
+    RUN(testZShrArgs(1, 0));
+    RUN(testZShrArgs(1, 1));
+    RUN(testZShrArgs(1, 62));
+    RUN(testZShrArgs(0xffffffffffffffff, 0));
+    RUN(testZShrArgs(0xffffffffffffffff, 1));
+    RUN(testZShrArgs(0xffffffffffffffff, 63));
+    RUN(testZShrImms(1, 0));
+    RUN(testZShrImms(1, 1));
+    RUN(testZShrImms(1, 62));
+    RUN(testZShrImms(1, 65));
+    RUN(testZShrImms(0xffffffffffffffff, 0));
+    RUN(testZShrImms(0xffffffffffffffff, 1));
+    RUN(testZShrImms(0xffffffffffffffff, 63));
+    RUN(testZShrArgImm(1, 0));
+    RUN(testZShrArgImm(1, 1));
+    RUN(testZShrArgImm(1, 62));
+    RUN(testZShrArgImm(1, 65));
+    RUN(testZShrArgImm(0xffffffffffffffff, 0));
+    RUN(testZShrArgImm(0xffffffffffffffff, 1));
+    RUN(testZShrArgImm(0xffffffffffffffff, 63));
+    RUN(testZShrArg32(32));
+    RUN(testZShrArgs32(1, 0));
+    RUN(testZShrArgs32(1, 1));
+    RUN(testZShrArgs32(1, 62));
+    RUN(testZShrArgs32(1, 33));
+    RUN(testZShrArgs32(0xffffffff, 0));
+    RUN(testZShrArgs32(0xffffffff, 1));
+    RUN(testZShrArgs32(0xffffffff, 63));
+    RUN(testZShrImms32(1, 0));
+    RUN(testZShrImms32(1, 1));
+    RUN(testZShrImms32(1, 62));
+    RUN(testZShrImms32(1, 33));
+    RUN(testZShrImms32(0xffffffff, 0));
+    RUN(testZShrImms32(0xffffffff, 1));
+    RUN(testZShrImms32(0xffffffff, 63));
+    RUN(testZShrArgImm32(1, 0));
+    RUN(testZShrArgImm32(1, 1));
+    RUN(testZShrArgImm32(1, 62));
+    RUN(testZShrArgImm32(0xffffffff, 0));
+    RUN(testZShrArgImm32(0xffffffff, 1));
+    RUN(testZShrArgImm32(0xffffffff, 63));
+
+    RUN_UNARY(testClzArg64, int64Operands());
+    RUN_UNARY(testClzMem64, int64Operands());
+    RUN_UNARY(testClzArg32, int32Operands());
+    RUN_UNARY(testClzMem32, int64Operands());
+
+    RUN_UNARY(testAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsImm, floatingPointOperands());
+    RUN_UNARY(testAbsMem, floatingPointOperands());
+    RUN_UNARY(testAbsAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsImm, floatingPointOperands());
+    RUN_UNARY(testAbsMem, floatingPointOperands());
+    RUN_UNARY(testAbsAbsArg, floatingPointOperands());
+    RUN_UNARY(testAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastAbsBitwiseCastArg, floatingPointOperands());
+    RUN_UNARY(testAbsArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testAbsArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilImm, floatingPointOperands());
+    RUN_UNARY(testCeilMem, floatingPointOperands());
+    RUN_UNARY(testCeilCeilArg, floatingPointOperands());
+    RUN_UNARY(testFloorCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilIToD64, int64Operands());
+    RUN_UNARY(testCeilIToD32, int32Operands());
+    RUN_UNARY(testCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilImm, floatingPointOperands());
+    RUN_UNARY(testCeilMem, floatingPointOperands());
+    RUN_UNARY(testCeilCeilArg, floatingPointOperands());
+    RUN_UNARY(testFloorCeilArg, floatingPointOperands());
+    RUN_UNARY(testCeilArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testCeilArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorImm, floatingPointOperands());
+    RUN_UNARY(testFloorMem, floatingPointOperands());
+    RUN_UNARY(testFloorFloorArg, floatingPointOperands());
+    RUN_UNARY(testCeilFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorIToD64, int64Operands());
+    RUN_UNARY(testFloorIToD32, int32Operands());
+    RUN_UNARY(testFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorImm, floatingPointOperands());
+    RUN_UNARY(testFloorMem, floatingPointOperands());
+    RUN_UNARY(testFloorFloorArg, floatingPointOperands());
+    RUN_UNARY(testCeilFloorArg, floatingPointOperands());
+    RUN_UNARY(testFloorArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testFloorArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_UNARY(testSqrtArg, floatingPointOperands());
+    RUN_UNARY(testSqrtImm, floatingPointOperands());
+    RUN_UNARY(testSqrtMem, floatingPointOperands());
+    RUN_UNARY(testSqrtArg, floatingPointOperands());
+    RUN_UNARY(testSqrtImm, floatingPointOperands());
+    RUN_UNARY(testSqrtMem, floatingPointOperands());
+    RUN_UNARY(testSqrtArgWithUselessDoubleConversion, floatingPointOperands());
+    RUN_UNARY(testSqrtArgWithEffectfulDoubleConversion, floatingPointOperands());
+
+    RUN_BINARY(testCompareTwoFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testCompareOneFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testCompareFloatToDoubleThroughPhi, floatingPointOperands(), floatingPointOperands());
+    RUN_UNARY(testDoubleToFloatThroughPhi, floatingPointOperands());
+    RUN(testReduceFloatToDoubleValidates());
+    RUN_UNARY(testDoubleProducerPhiToFloatConversion, floatingPointOperands());
+    RUN_UNARY(testDoubleProducerPhiToFloatConversionWithDoubleConsumer, floatingPointOperands());
+    RUN_BINARY(testDoubleProducerPhiWithNonFloatConst, floatingPointOperands(), floatingPointOperands());
+
+    RUN_UNARY(testDoubleArgToInt64BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testDoubleImmToInt64BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testTwoBitwiseCastOnDouble, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnDoubleInMemory, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnDoubleInMemoryIndexed, floatingPointOperands());
+    RUN_UNARY(testInt64BArgToDoubleBitwiseCast, int64Operands());
+    RUN_UNARY(testInt64BImmToDoubleBitwiseCast, int64Operands());
+    RUN_UNARY(testTwoBitwiseCastOnInt64, int64Operands());
+    RUN_UNARY(testBitwiseCastOnInt64InMemory, int64Operands());
+    RUN_UNARY(testBitwiseCastOnInt64InMemoryIndexed, int64Operands());
+    RUN_UNARY(testFloatImmToInt32BitwiseCast, floatingPointOperands());
+    RUN_UNARY(testBitwiseCastOnFloatInMemory, floatingPointOperands());
+    RUN_UNARY(testInt32BArgToFloatBitwiseCast, int32Operands());
+    RUN_UNARY(testInt32BImmToFloatBitwiseCast, int32Operands());
+    RUN_UNARY(testTwoBitwiseCastOnInt32, int32Operands());
+    RUN_UNARY(testBitwiseCastOnInt32InMemory, int32Operands());
+
+    RUN_UNARY(testConvertDoubleToFloatArg, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatImm, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatMem, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleArg, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleImm, floatingPointOperands());
+    RUN_UNARY(testConvertFloatToDoubleMem, floatingPointOperands());
+    RUN_UNARY(testConvertDoubleToFloatToDoubleToFloat, floatingPointOperands());
+    RUN_UNARY(testStoreFloat, floatingPointOperands());
+    RUN_UNARY(testStoreDoubleConstantAsFloat, floatingPointOperands());
+    RUN_UNARY(testLoadFloatConvertDoubleConvertFloatStoreFloat, floatingPointOperands());
+    RUN_UNARY(testFroundArg, floatingPointOperands());
+    RUN_UNARY(testFroundMem, floatingPointOperands());
+
+    RUN(testIToD64Arg());
+    RUN(testIToF64Arg());
+    RUN(testIToD32Arg());
+    RUN(testIToF32Arg());
+    RUN(testIToD64Mem());
+    RUN(testIToF64Mem());
+    RUN(testIToD32Mem());
+    RUN(testIToF32Mem());
+    RUN_UNARY(testIToD64Imm, int64Operands());
+    RUN_UNARY(testIToF64Imm, int64Operands());
+    RUN_UNARY(testIToD32Imm, int32Operands());
+    RUN_UNARY(testIToF32Imm, int32Operands());
+    RUN(testIToDReducedToIToF64Arg());
+    RUN(testIToDReducedToIToF32Arg());
+
+    RUN(testStore32(44));
+    RUN(testStoreConstant(49));
+    RUN(testStoreConstantPtr(49));
+    RUN(testStore8Arg());
+    RUN(testStore8Imm());
+    RUN(testStorePartial8BitRegisterOnX86());
+    RUN(testStore16Arg());
+    RUN(testStore16Imm());
+    RUN(testTrunc((static_cast(1) << 40) + 42));
+    RUN(testAdd1(45));
+    RUN(testAdd1Ptr(51));
+    RUN(testAdd1Ptr(bitwise_cast(vm)));
+    RUN(testNeg32(52));
+    RUN(testNegPtr(53));
+    RUN(testStoreAddLoad32(46));
+    RUN(testStoreAddLoadImm32(46));
+    RUN(testStoreAddLoad64(4600));
+    RUN(testStoreAddLoadImm64(4600));
+    RUN(testStoreAddLoad8(4, Load8Z));
+    RUN(testStoreAddLoadImm8(4, Load8Z));
+    RUN(testStoreAddLoad8(4, Load8S));
+    RUN(testStoreAddLoadImm8(4, Load8S));
+    RUN(testStoreAddLoad16(6, Load16Z));
+    RUN(testStoreAddLoadImm16(6, Load16Z));
+    RUN(testStoreAddLoad16(6, Load16S));
+    RUN(testStoreAddLoadImm16(6, Load16S));
+    RUN(testStoreAddLoad32Index(46));
+    RUN(testStoreAddLoadImm32Index(46));
+    RUN(testStoreAddLoad64Index(4600));
+    RUN(testStoreAddLoadImm64Index(4600));
+    RUN(testStoreAddLoad8Index(4, Load8Z));
+    RUN(testStoreAddLoadImm8Index(4, Load8Z));
+    RUN(testStoreAddLoad8Index(4, Load8S));
+    RUN(testStoreAddLoadImm8Index(4, Load8S));
+    RUN(testStoreAddLoad16Index(6, Load16Z));
+    RUN(testStoreAddLoadImm16Index(6, Load16Z));
+    RUN(testStoreAddLoad16Index(6, Load16S));
+    RUN(testStoreAddLoadImm16Index(6, Load16S));
+    RUN(testStoreSubLoad(46));
+    RUN(testStoreAddLoadInterference(52));
+    RUN(testStoreAddAndLoad(47, 0xffff));
+    RUN(testStoreAddAndLoad(470000, 0xffff));
+    RUN(testStoreNegLoad32(54));
+    RUN(testStoreNegLoadPtr(55));
+    RUN(testAdd1Uncommuted(48));
+    RUN(testLoadOffset());
+    RUN(testLoadOffsetNotConstant());
+    RUN(testLoadOffsetUsingAdd());
+    RUN(testLoadOffsetUsingAddInterference());
+    RUN(testLoadOffsetUsingAddNotConstant());
+    RUN(testLoadAddrShift(0));
+    RUN(testLoadAddrShift(1));
+    RUN(testLoadAddrShift(2));
+    RUN(testLoadAddrShift(3));
+    RUN(testFramePointer());
+    RUN(testOverrideFramePointer());
+    RUN(testStackSlot());
+    RUN(testLoadFromFramePointer());
+    RUN(testStoreLoadStackSlot(50));
+    
+    RUN(testBranch());
+    RUN(testBranchPtr());
+    RUN(testDiamond());
+    RUN(testBranchNotEqual());
+    RUN(testBranchNotEqualCommute());
+    RUN(testBranchNotEqualNotEqual());
+    RUN(testBranchEqual());
+    RUN(testBranchEqualEqual());
+    RUN(testBranchEqualCommute());
+    RUN(testBranchEqualEqual1());
+    RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedDoubleArgImm, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatArgImm, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedDoubleImms, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatImms, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchEqualOrUnorderedFloatWithUselessDoubleConversion, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testBranchNotEqualAndOrderedArgs, floatingPointOperands(), floatingPointOperands());
+    RUN(testBranchFold(42));
+    RUN(testBranchFold(0));
+    RUN(testDiamondFold(42));
+    RUN(testDiamondFold(0));
+    RUN(testBranchNotEqualFoldPtr(42));
+    RUN(testBranchNotEqualFoldPtr(0));
+    RUN(testBranchEqualFoldPtr(42));
+    RUN(testBranchEqualFoldPtr(0));
+    RUN(testBranchLoadPtr());
+    RUN(testBranchLoad32());
+    RUN(testBranchLoad8S());
+    RUN(testBranchLoad8Z());
+    RUN(testBranchLoad16S());
+    RUN(testBranchLoad16Z());
+    RUN(testBranch8WithLoad8ZIndex());
+
+    RUN(testComplex(64, 128));
+    RUN(testComplex(4, 128));
+    RUN(testComplex(4, 256));
+    RUN(testComplex(4, 384));
+
+    RUN(testSimplePatchpoint());
+    RUN(testSimplePatchpointWithoutOuputClobbersGPArgs());
+    RUN(testSimplePatchpointWithOuputClobbersGPArgs());
+    RUN(testSimplePatchpointWithoutOuputClobbersFPArgs());
+    RUN(testSimplePatchpointWithOuputClobbersFPArgs());
+    RUN(testPatchpointWithEarlyClobber());
+    RUN(testPatchpointCallArg());
+    RUN(testPatchpointFixedRegister());
+    RUN(testPatchpointAny(ValueRep::WarmAny));
+    RUN(testPatchpointAny(ValueRep::ColdAny));
+    RUN(testPatchpointGPScratch());
+    RUN(testPatchpointFPScratch());
+    RUN(testPatchpointLotsOfLateAnys());
+    RUN(testPatchpointAnyImm(ValueRep::WarmAny));
+    RUN(testPatchpointAnyImm(ValueRep::ColdAny));
+    RUN(testPatchpointAnyImm(ValueRep::LateColdAny));
+    RUN(testPatchpointManyImms());
+    RUN(testPatchpointWithRegisterResult());
+    RUN(testPatchpointWithStackArgumentResult());
+    RUN(testPatchpointWithAnyResult());
+    RUN(testSimpleCheck());
+    RUN(testCheckFalse());
+    RUN(testCheckTrue());
+    RUN(testCheckLessThan());
+    RUN(testCheckMegaCombo());
+    RUN(testCheckTrickyMegaCombo());
+    RUN(testCheckTwoMegaCombos());
+    RUN(testCheckTwoNonRedundantMegaCombos());
+    RUN(testCheckAddImm());
+    RUN(testCheckAddImmCommute());
+    RUN(testCheckAddImmSomeRegister());
+    RUN(testCheckAdd());
+    RUN(testCheckAdd64());
+    RUN(testCheckAddFold(100, 200));
+    RUN(testCheckAddFoldFail(2147483647, 100));
+    RUN(testCheckAddArgumentAliasing64());
+    RUN(testCheckAddArgumentAliasing32());
+    RUN(testCheckAddSelfOverflow64());
+    RUN(testCheckAddSelfOverflow32());
+    RUN(testCheckSubImm());
+    RUN(testCheckSubBadImm());
+    RUN(testCheckSub());
+    RUN(testCheckSub64());
+    RUN(testCheckSubFold(100, 200));
+    RUN(testCheckSubFoldFail(-2147483647, 100));
+    RUN(testCheckNeg());
+    RUN(testCheckNeg64());
+    RUN(testCheckMul());
+    RUN(testCheckMulMemory());
+    RUN(testCheckMul2());
+    RUN(testCheckMul64());
+    RUN(testCheckMulFold(100, 200));
+    RUN(testCheckMulFoldFail(2147483647, 100));
+    RUN(testCheckMulArgumentAliasing64());
+    RUN(testCheckMulArgumentAliasing32());
+
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Equal, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(NotEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessThan, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterThan, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(LessEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(GreaterEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Below, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(Above, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(BelowEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(AboveEqual, a, b); }, int64Operands(), int64Operands());
+    RUN_BINARY([](int32_t a, int32_t b) { testCompare(BitAnd, a, b); }, int64Operands(), int64Operands());
+
+    RUN(testEqualDouble(42, 42, true));
+    RUN(testEqualDouble(0, -0, true));
+    RUN(testEqualDouble(42, 43, false));
+    RUN(testEqualDouble(PNaN, 42, false));
+    RUN(testEqualDouble(42, PNaN, false));
+    RUN(testEqualDouble(PNaN, PNaN, false));
+
+    RUN(testLoad(60));
+    RUN(testLoad(-60));
+    RUN(testLoad(1000));
+    RUN(testLoad(-1000));
+    RUN(testLoad(1000000));
+    RUN(testLoad(-1000000));
+    RUN(testLoad(1000000000));
+    RUN(testLoad(-1000000000));
+    RUN_UNARY(testLoad, int64Operands());
+    RUN_UNARY(testLoad, floatingPointOperands());
+    RUN_UNARY(testLoad, floatingPointOperands());
+    
+    RUN(testLoad(Load8S, 60));
+    RUN(testLoad(Load8S, -60));
+    RUN(testLoad(Load8S, 1000));
+    RUN(testLoad(Load8S, -1000));
+    RUN(testLoad(Load8S, 1000000));
+    RUN(testLoad(Load8S, -1000000));
+    RUN(testLoad(Load8S, 1000000000));
+    RUN(testLoad(Load8S, -1000000000));
+    
+    RUN(testLoad(Load8Z, 60));
+    RUN(testLoad(Load8Z, -60));
+    RUN(testLoad(Load8Z, 1000));
+    RUN(testLoad(Load8Z, -1000));
+    RUN(testLoad(Load8Z, 1000000));
+    RUN(testLoad(Load8Z, -1000000));
+    RUN(testLoad(Load8Z, 1000000000));
+    RUN(testLoad(Load8Z, -1000000000));
+
+    RUN(testLoad(Load16S, 60));
+    RUN(testLoad(Load16S, -60));
+    RUN(testLoad(Load16S, 1000));
+    RUN(testLoad(Load16S, -1000));
+    RUN(testLoad(Load16S, 1000000));
+    RUN(testLoad(Load16S, -1000000));
+    RUN(testLoad(Load16S, 1000000000));
+    RUN(testLoad(Load16S, -1000000000));
+    
+    RUN(testLoad(Load16Z, 60));
+    RUN(testLoad(Load16Z, -60));
+    RUN(testLoad(Load16Z, 1000));
+    RUN(testLoad(Load16Z, -1000));
+    RUN(testLoad(Load16Z, 1000000));
+    RUN(testLoad(Load16Z, -1000000));
+    RUN(testLoad(Load16Z, 1000000000));
+    RUN(testLoad(Load16Z, -1000000000));
+
+    RUN(testSpillGP());
+    RUN(testSpillFP());
+
+    RUN(testInt32ToDoublePartialRegisterStall());
+    RUN(testInt32ToDoublePartialRegisterWithoutStall());
+
+    RUN(testCallSimple(1, 2));
+    RUN(testCallRare(1, 2));
+    RUN(testCallRareLive(1, 2, 3));
+    RUN(testCallSimplePure(1, 2));
+    RUN(testCallFunctionWithHellaArguments());
+
+    RUN(testReturnDouble(0.0));
+    RUN(testReturnDouble(negativeZero()));
+    RUN(testReturnDouble(42.5));
+    RUN_UNARY(testReturnFloat, floatingPointOperands());
+
+    RUN(testCallSimpleDouble(1, 2));
+    RUN(testCallFunctionWithHellaDoubleArguments());
+    RUN_BINARY(testCallSimpleFloat, floatingPointOperands(), floatingPointOperands());
+    RUN(testCallFunctionWithHellaFloatArguments());
+
+    RUN(testChillDiv(4, 2, 2));
+    RUN(testChillDiv(1, 0, 0));
+    RUN(testChillDiv(0, 0, 0));
+    RUN(testChillDiv(1, -1, -1));
+    RUN(testChillDiv(-2147483647 - 1, 0, 0));
+    RUN(testChillDiv(-2147483647 - 1, 1, -2147483647 - 1));
+    RUN(testChillDiv(-2147483647 - 1, -1, -2147483647 - 1));
+    RUN(testChillDiv(-2147483647 - 1, 2, -1073741824));
+    RUN(testChillDiv64(4, 2, 2));
+    RUN(testChillDiv64(1, 0, 0));
+    RUN(testChillDiv64(0, 0, 0));
+    RUN(testChillDiv64(1, -1, -1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 0, 0));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 1, -9223372036854775807ll - 1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, -1, -9223372036854775807ll - 1));
+    RUN(testChillDiv64(-9223372036854775807ll - 1, 2, -4611686018427387904));
+    RUN(testChillDivTwice(4, 2, 6, 2, 5));
+    RUN(testChillDivTwice(4, 0, 6, 2, 3));
+    RUN(testChillDivTwice(4, 2, 6, 0, 2));
+
+    RUN_UNARY(testModArg, int64Operands());
+    RUN_BINARY(testModArgs, int64Operands(), int64Operands());
+    RUN_BINARY(testModImms, int64Operands(), int64Operands());
+    RUN_UNARY(testModArg32, int32Operands());
+    RUN_BINARY(testModArgs32, int32Operands(), int32Operands());
+    RUN_BINARY(testModImms32, int32Operands(), int32Operands());
+    RUN_UNARY(testChillModArg, int64Operands());
+    RUN_BINARY(testChillModArgs, int64Operands(), int64Operands());
+    RUN_BINARY(testChillModImms, int64Operands(), int64Operands());
+    RUN_UNARY(testChillModArg32, int32Operands());
+    RUN_BINARY(testChillModArgs32, int32Operands(), int32Operands());
+    RUN_BINARY(testChillModImms32, int32Operands(), int32Operands());
+
+    RUN(testSwitch(0, 1));
+    RUN(testSwitch(1, 1));
+    RUN(testSwitch(2, 1));
+    RUN(testSwitch(2, 2));
+    RUN(testSwitch(10, 1));
+    RUN(testSwitch(10, 2));
+    RUN(testSwitch(100, 1));
+    RUN(testSwitch(100, 100));
+
+    RUN(testSwitchChillDiv(0, 1));
+    RUN(testSwitchChillDiv(1, 1));
+    RUN(testSwitchChillDiv(2, 1));
+    RUN(testSwitchChillDiv(2, 2));
+    RUN(testSwitchChillDiv(10, 1));
+    RUN(testSwitchChillDiv(10, 2));
+    RUN(testSwitchChillDiv(100, 1));
+    RUN(testSwitchChillDiv(100, 100));
+
+    RUN(testSwitchTargettingSameBlock());
+    RUN(testSwitchTargettingSameBlockFoldPathConstant());
+
+    RUN(testTrunc(0));
+    RUN(testTrunc(1));
+    RUN(testTrunc(-1));
+    RUN(testTrunc(1000000000000ll));
+    RUN(testTrunc(-1000000000000ll));
+    RUN(testTruncFold(0));
+    RUN(testTruncFold(1));
+    RUN(testTruncFold(-1));
+    RUN(testTruncFold(1000000000000ll));
+    RUN(testTruncFold(-1000000000000ll));
+    
+    RUN(testZExt32(0));
+    RUN(testZExt32(1));
+    RUN(testZExt32(-1));
+    RUN(testZExt32(1000000000ll));
+    RUN(testZExt32(-1000000000ll));
+    RUN(testZExt32Fold(0));
+    RUN(testZExt32Fold(1));
+    RUN(testZExt32Fold(-1));
+    RUN(testZExt32Fold(1000000000ll));
+    RUN(testZExt32Fold(-1000000000ll));
+
+    RUN(testSExt32(0));
+    RUN(testSExt32(1));
+    RUN(testSExt32(-1));
+    RUN(testSExt32(1000000000ll));
+    RUN(testSExt32(-1000000000ll));
+    RUN(testSExt32Fold(0));
+    RUN(testSExt32Fold(1));
+    RUN(testSExt32Fold(-1));
+    RUN(testSExt32Fold(1000000000ll));
+    RUN(testSExt32Fold(-1000000000ll));
+
+    RUN(testTruncZExt32(0));
+    RUN(testTruncZExt32(1));
+    RUN(testTruncZExt32(-1));
+    RUN(testTruncZExt32(1000000000ll));
+    RUN(testTruncZExt32(-1000000000ll));
+    RUN(testTruncSExt32(0));
+    RUN(testTruncSExt32(1));
+    RUN(testTruncSExt32(-1));
+    RUN(testTruncSExt32(1000000000ll));
+    RUN(testTruncSExt32(-1000000000ll));
+
+    RUN(testSExt8(0));
+    RUN(testSExt8(1));
+    RUN(testSExt8(42));
+    RUN(testSExt8(-1));
+    RUN(testSExt8(0xff));
+    RUN(testSExt8(0x100));
+    RUN(testSExt8Fold(0));
+    RUN(testSExt8Fold(1));
+    RUN(testSExt8Fold(42));
+    RUN(testSExt8Fold(-1));
+    RUN(testSExt8Fold(0xff));
+    RUN(testSExt8Fold(0x100));
+    RUN(testSExt8SExt8(0));
+    RUN(testSExt8SExt8(1));
+    RUN(testSExt8SExt8(42));
+    RUN(testSExt8SExt8(-1));
+    RUN(testSExt8SExt8(0xff));
+    RUN(testSExt8SExt8(0x100));
+    RUN(testSExt8SExt16(0));
+    RUN(testSExt8SExt16(1));
+    RUN(testSExt8SExt16(42));
+    RUN(testSExt8SExt16(-1));
+    RUN(testSExt8SExt16(0xff));
+    RUN(testSExt8SExt16(0x100));
+    RUN(testSExt8SExt16(0xffff));
+    RUN(testSExt8SExt16(0x10000));
+    RUN(testSExt8BitAnd(0, 0));
+    RUN(testSExt8BitAnd(1, 0));
+    RUN(testSExt8BitAnd(42, 0));
+    RUN(testSExt8BitAnd(-1, 0));
+    RUN(testSExt8BitAnd(0xff, 0));
+    RUN(testSExt8BitAnd(0x100, 0));
+    RUN(testSExt8BitAnd(0xffff, 0));
+    RUN(testSExt8BitAnd(0x10000, 0));
+    RUN(testSExt8BitAnd(0, 0xf));
+    RUN(testSExt8BitAnd(1, 0xf));
+    RUN(testSExt8BitAnd(42, 0xf));
+    RUN(testSExt8BitAnd(-1, 0xf));
+    RUN(testSExt8BitAnd(0xff, 0xf));
+    RUN(testSExt8BitAnd(0x100, 0xf));
+    RUN(testSExt8BitAnd(0xffff, 0xf));
+    RUN(testSExt8BitAnd(0x10000, 0xf));
+    RUN(testSExt8BitAnd(0, 0xff));
+    RUN(testSExt8BitAnd(1, 0xff));
+    RUN(testSExt8BitAnd(42, 0xff));
+    RUN(testSExt8BitAnd(-1, 0xff));
+    RUN(testSExt8BitAnd(0xff, 0xff));
+    RUN(testSExt8BitAnd(0x100, 0xff));
+    RUN(testSExt8BitAnd(0xffff, 0xff));
+    RUN(testSExt8BitAnd(0x10000, 0xff));
+    RUN(testSExt8BitAnd(0, 0x80));
+    RUN(testSExt8BitAnd(1, 0x80));
+    RUN(testSExt8BitAnd(42, 0x80));
+    RUN(testSExt8BitAnd(-1, 0x80));
+    RUN(testSExt8BitAnd(0xff, 0x80));
+    RUN(testSExt8BitAnd(0x100, 0x80));
+    RUN(testSExt8BitAnd(0xffff, 0x80));
+    RUN(testSExt8BitAnd(0x10000, 0x80));
+    RUN(testBitAndSExt8(0, 0xf));
+    RUN(testBitAndSExt8(1, 0xf));
+    RUN(testBitAndSExt8(42, 0xf));
+    RUN(testBitAndSExt8(-1, 0xf));
+    RUN(testBitAndSExt8(0xff, 0xf));
+    RUN(testBitAndSExt8(0x100, 0xf));
+    RUN(testBitAndSExt8(0xffff, 0xf));
+    RUN(testBitAndSExt8(0x10000, 0xf));
+    RUN(testBitAndSExt8(0, 0xff));
+    RUN(testBitAndSExt8(1, 0xff));
+    RUN(testBitAndSExt8(42, 0xff));
+    RUN(testBitAndSExt8(-1, 0xff));
+    RUN(testBitAndSExt8(0xff, 0xff));
+    RUN(testBitAndSExt8(0x100, 0xff));
+    RUN(testBitAndSExt8(0xffff, 0xff));
+    RUN(testBitAndSExt8(0x10000, 0xff));
+    RUN(testBitAndSExt8(0, 0xfff));
+    RUN(testBitAndSExt8(1, 0xfff));
+    RUN(testBitAndSExt8(42, 0xfff));
+    RUN(testBitAndSExt8(-1, 0xfff));
+    RUN(testBitAndSExt8(0xff, 0xfff));
+    RUN(testBitAndSExt8(0x100, 0xfff));
+    RUN(testBitAndSExt8(0xffff, 0xfff));
+    RUN(testBitAndSExt8(0x10000, 0xfff));
+
+    RUN(testSExt16(0));
+    RUN(testSExt16(1));
+    RUN(testSExt16(42));
+    RUN(testSExt16(-1));
+    RUN(testSExt16(0xffff));
+    RUN(testSExt16(0x10000));
+    RUN(testSExt16Fold(0));
+    RUN(testSExt16Fold(1));
+    RUN(testSExt16Fold(42));
+    RUN(testSExt16Fold(-1));
+    RUN(testSExt16Fold(0xffff));
+    RUN(testSExt16Fold(0x10000));
+    RUN(testSExt16SExt8(0));
+    RUN(testSExt16SExt8(1));
+    RUN(testSExt16SExt8(42));
+    RUN(testSExt16SExt8(-1));
+    RUN(testSExt16SExt8(0xffff));
+    RUN(testSExt16SExt8(0x10000));
+    RUN(testSExt16SExt16(0));
+    RUN(testSExt16SExt16(1));
+    RUN(testSExt16SExt16(42));
+    RUN(testSExt16SExt16(-1));
+    RUN(testSExt16SExt16(0xffff));
+    RUN(testSExt16SExt16(0x10000));
+    RUN(testSExt16SExt16(0xffffff));
+    RUN(testSExt16SExt16(0x1000000));
+    RUN(testSExt16BitAnd(0, 0));
+    RUN(testSExt16BitAnd(1, 0));
+    RUN(testSExt16BitAnd(42, 0));
+    RUN(testSExt16BitAnd(-1, 0));
+    RUN(testSExt16BitAnd(0xffff, 0));
+    RUN(testSExt16BitAnd(0x10000, 0));
+    RUN(testSExt16BitAnd(0xffffff, 0));
+    RUN(testSExt16BitAnd(0x1000000, 0));
+    RUN(testSExt16BitAnd(0, 0xf));
+    RUN(testSExt16BitAnd(1, 0xf));
+    RUN(testSExt16BitAnd(42, 0xf));
+    RUN(testSExt16BitAnd(-1, 0xf));
+    RUN(testSExt16BitAnd(0xffff, 0xf));
+    RUN(testSExt16BitAnd(0x10000, 0xf));
+    RUN(testSExt16BitAnd(0xffffff, 0xf));
+    RUN(testSExt16BitAnd(0x1000000, 0xf));
+    RUN(testSExt16BitAnd(0, 0xffff));
+    RUN(testSExt16BitAnd(1, 0xffff));
+    RUN(testSExt16BitAnd(42, 0xffff));
+    RUN(testSExt16BitAnd(-1, 0xffff));
+    RUN(testSExt16BitAnd(0xffff, 0xffff));
+    RUN(testSExt16BitAnd(0x10000, 0xffff));
+    RUN(testSExt16BitAnd(0xffffff, 0xffff));
+    RUN(testSExt16BitAnd(0x1000000, 0xffff));
+    RUN(testSExt16BitAnd(0, 0x8000));
+    RUN(testSExt16BitAnd(1, 0x8000));
+    RUN(testSExt16BitAnd(42, 0x8000));
+    RUN(testSExt16BitAnd(-1, 0x8000));
+    RUN(testSExt16BitAnd(0xffff, 0x8000));
+    RUN(testSExt16BitAnd(0x10000, 0x8000));
+    RUN(testSExt16BitAnd(0xffffff, 0x8000));
+    RUN(testSExt16BitAnd(0x1000000, 0x8000));
+    RUN(testBitAndSExt16(0, 0xf));
+    RUN(testBitAndSExt16(1, 0xf));
+    RUN(testBitAndSExt16(42, 0xf));
+    RUN(testBitAndSExt16(-1, 0xf));
+    RUN(testBitAndSExt16(0xffff, 0xf));
+    RUN(testBitAndSExt16(0x10000, 0xf));
+    RUN(testBitAndSExt16(0xffffff, 0xf));
+    RUN(testBitAndSExt16(0x1000000, 0xf));
+    RUN(testBitAndSExt16(0, 0xffff));
+    RUN(testBitAndSExt16(1, 0xffff));
+    RUN(testBitAndSExt16(42, 0xffff));
+    RUN(testBitAndSExt16(-1, 0xffff));
+    RUN(testBitAndSExt16(0xffff, 0xffff));
+    RUN(testBitAndSExt16(0x10000, 0xffff));
+    RUN(testBitAndSExt16(0xffffff, 0xffff));
+    RUN(testBitAndSExt16(0x1000000, 0xffff));
+    RUN(testBitAndSExt16(0, 0xfffff));
+    RUN(testBitAndSExt16(1, 0xfffff));
+    RUN(testBitAndSExt16(42, 0xfffff));
+    RUN(testBitAndSExt16(-1, 0xfffff));
+    RUN(testBitAndSExt16(0xffff, 0xfffff));
+    RUN(testBitAndSExt16(0x10000, 0xfffff));
+    RUN(testBitAndSExt16(0xffffff, 0xfffff));
+    RUN(testBitAndSExt16(0x1000000, 0xfffff));
+
+    RUN(testSExt32BitAnd(0, 0));
+    RUN(testSExt32BitAnd(1, 0));
+    RUN(testSExt32BitAnd(42, 0));
+    RUN(testSExt32BitAnd(-1, 0));
+    RUN(testSExt32BitAnd(0x80000000, 0));
+    RUN(testSExt32BitAnd(0, 0xf));
+    RUN(testSExt32BitAnd(1, 0xf));
+    RUN(testSExt32BitAnd(42, 0xf));
+    RUN(testSExt32BitAnd(-1, 0xf));
+    RUN(testSExt32BitAnd(0x80000000, 0xf));
+    RUN(testSExt32BitAnd(0, 0x80000000));
+    RUN(testSExt32BitAnd(1, 0x80000000));
+    RUN(testSExt32BitAnd(42, 0x80000000));
+    RUN(testSExt32BitAnd(-1, 0x80000000));
+    RUN(testSExt32BitAnd(0x80000000, 0x80000000));
+    RUN(testBitAndSExt32(0, 0xf));
+    RUN(testBitAndSExt32(1, 0xf));
+    RUN(testBitAndSExt32(42, 0xf));
+    RUN(testBitAndSExt32(-1, 0xf));
+    RUN(testBitAndSExt32(0xffff, 0xf));
+    RUN(testBitAndSExt32(0x10000, 0xf));
+    RUN(testBitAndSExt32(0xffffff, 0xf));
+    RUN(testBitAndSExt32(0x1000000, 0xf));
+    RUN(testBitAndSExt32(0, 0xffff00000000llu));
+    RUN(testBitAndSExt32(1, 0xffff00000000llu));
+    RUN(testBitAndSExt32(42, 0xffff00000000llu));
+    RUN(testBitAndSExt32(-1, 0xffff00000000llu));
+    RUN(testBitAndSExt32(0x80000000, 0xffff00000000llu));
+
+    RUN(testBasicSelect());
+    RUN(testSelectTest());
+    RUN(testSelectCompareDouble());
+    RUN_BINARY(testSelectCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSelectCompareFloatToDouble, floatingPointOperands(), floatingPointOperands());
+    RUN(testSelectDouble());
+    RUN(testSelectDoubleTest());
+    RUN(testSelectDoubleCompareDouble());
+    RUN_BINARY(testSelectDoubleCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN_BINARY(testSelectFloatCompareFloat, floatingPointOperands(), floatingPointOperands());
+    RUN(testSelectDoubleCompareDoubleWithAliasing());
+    RUN(testSelectFloatCompareFloatWithAliasing());
+    RUN(testSelectFold(42));
+    RUN(testSelectFold(43));
+    RUN(testSelectInvert());
+    RUN(testCheckSelect());
+    RUN(testCheckSelectCheckSelect());
+    RUN_BINARY(testPowDoubleByIntegerLoop, floatingPointOperands(), int64Operands());
+
+    RUN(testTruncOrHigh());
+    RUN(testTruncOrLow());
+    RUN(testBitAndOrHigh());
+    RUN(testBitAndOrLow());
+
+    RUN(testBranch64Equal(0, 0));
+    RUN(testBranch64Equal(1, 1));
+    RUN(testBranch64Equal(-1, -1));
+    RUN(testBranch64Equal(1, -1));
+    RUN(testBranch64Equal(-1, 1));
+    RUN(testBranch64EqualImm(0, 0));
+    RUN(testBranch64EqualImm(1, 1));
+    RUN(testBranch64EqualImm(-1, -1));
+    RUN(testBranch64EqualImm(1, -1));
+    RUN(testBranch64EqualImm(-1, 1));
+    RUN(testBranch64EqualMem(0, 0));
+    RUN(testBranch64EqualMem(1, 1));
+    RUN(testBranch64EqualMem(-1, -1));
+    RUN(testBranch64EqualMem(1, -1));
+    RUN(testBranch64EqualMem(-1, 1));
+    RUN(testBranch64EqualMemImm(0, 0));
+    RUN(testBranch64EqualMemImm(1, 1));
+    RUN(testBranch64EqualMemImm(-1, -1));
+    RUN(testBranch64EqualMemImm(1, -1));
+    RUN(testBranch64EqualMemImm(-1, 1));
+
+    RUN(testStore8Load8Z(0));
+    RUN(testStore8Load8Z(123));
+    RUN(testStore8Load8Z(12345));
+    RUN(testStore8Load8Z(-123));
+
+    RUN(testStore16Load16Z(0));
+    RUN(testStore16Load16Z(123));
+    RUN(testStore16Load16Z(12345));
+    RUN(testStore16Load16Z(12345678));
+    RUN(testStore16Load16Z(-123));
+
+    RUN(testSShrShl32(42, 24, 24));
+    RUN(testSShrShl32(-42, 24, 24));
+    RUN(testSShrShl32(4200, 24, 24));
+    RUN(testSShrShl32(-4200, 24, 24));
+    RUN(testSShrShl32(4200000, 24, 24));
+    RUN(testSShrShl32(-4200000, 24, 24));
+
+    RUN(testSShrShl32(42, 16, 16));
+    RUN(testSShrShl32(-42, 16, 16));
+    RUN(testSShrShl32(4200, 16, 16));
+    RUN(testSShrShl32(-4200, 16, 16));
+    RUN(testSShrShl32(4200000, 16, 16));
+    RUN(testSShrShl32(-4200000, 16, 16));
+
+    RUN(testSShrShl32(42, 8, 8));
+    RUN(testSShrShl32(-42, 8, 8));
+    RUN(testSShrShl32(4200, 8, 8));
+    RUN(testSShrShl32(-4200, 8, 8));
+    RUN(testSShrShl32(4200000, 8, 8));
+    RUN(testSShrShl32(-4200000, 8, 8));
+    RUN(testSShrShl32(420000000, 8, 8));
+    RUN(testSShrShl32(-420000000, 8, 8));
+
+    RUN(testSShrShl64(42, 56, 56));
+    RUN(testSShrShl64(-42, 56, 56));
+    RUN(testSShrShl64(4200, 56, 56));
+    RUN(testSShrShl64(-4200, 56, 56));
+    RUN(testSShrShl64(4200000, 56, 56));
+    RUN(testSShrShl64(-4200000, 56, 56));
+    RUN(testSShrShl64(420000000, 56, 56));
+    RUN(testSShrShl64(-420000000, 56, 56));
+    RUN(testSShrShl64(42000000000, 56, 56));
+    RUN(testSShrShl64(-42000000000, 56, 56));
+
+    RUN(testSShrShl64(42, 48, 48));
+    RUN(testSShrShl64(-42, 48, 48));
+    RUN(testSShrShl64(4200, 48, 48));
+    RUN(testSShrShl64(-4200, 48, 48));
+    RUN(testSShrShl64(4200000, 48, 48));
+    RUN(testSShrShl64(-4200000, 48, 48));
+    RUN(testSShrShl64(420000000, 48, 48));
+    RUN(testSShrShl64(-420000000, 48, 48));
+    RUN(testSShrShl64(42000000000, 48, 48));
+    RUN(testSShrShl64(-42000000000, 48, 48));
+
+    RUN(testSShrShl64(42, 32, 32));
+    RUN(testSShrShl64(-42, 32, 32));
+    RUN(testSShrShl64(4200, 32, 32));
+    RUN(testSShrShl64(-4200, 32, 32));
+    RUN(testSShrShl64(4200000, 32, 32));
+    RUN(testSShrShl64(-4200000, 32, 32));
+    RUN(testSShrShl64(420000000, 32, 32));
+    RUN(testSShrShl64(-420000000, 32, 32));
+    RUN(testSShrShl64(42000000000, 32, 32));
+    RUN(testSShrShl64(-42000000000, 32, 32));
+
+    RUN(testSShrShl64(42, 24, 24));
+    RUN(testSShrShl64(-42, 24, 24));
+    RUN(testSShrShl64(4200, 24, 24));
+    RUN(testSShrShl64(-4200, 24, 24));
+    RUN(testSShrShl64(4200000, 24, 24));
+    RUN(testSShrShl64(-4200000, 24, 24));
+    RUN(testSShrShl64(420000000, 24, 24));
+    RUN(testSShrShl64(-420000000, 24, 24));
+    RUN(testSShrShl64(42000000000, 24, 24));
+    RUN(testSShrShl64(-42000000000, 24, 24));
+
+    RUN(testSShrShl64(42, 16, 16));
+    RUN(testSShrShl64(-42, 16, 16));
+    RUN(testSShrShl64(4200, 16, 16));
+    RUN(testSShrShl64(-4200, 16, 16));
+    RUN(testSShrShl64(4200000, 16, 16));
+    RUN(testSShrShl64(-4200000, 16, 16));
+    RUN(testSShrShl64(420000000, 16, 16));
+    RUN(testSShrShl64(-420000000, 16, 16));
+    RUN(testSShrShl64(42000000000, 16, 16));
+    RUN(testSShrShl64(-42000000000, 16, 16));
+
+    RUN(testSShrShl64(42, 8, 8));
+    RUN(testSShrShl64(-42, 8, 8));
+    RUN(testSShrShl64(4200, 8, 8));
+    RUN(testSShrShl64(-4200, 8, 8));
+    RUN(testSShrShl64(4200000, 8, 8));
+    RUN(testSShrShl64(-4200000, 8, 8));
+    RUN(testSShrShl64(420000000, 8, 8));
+    RUN(testSShrShl64(-420000000, 8, 8));
+    RUN(testSShrShl64(42000000000, 8, 8));
+    RUN(testSShrShl64(-42000000000, 8, 8));
+
+    RUN(testCheckMul64SShr());
+
+    RUN_BINARY(testRotR, int32Operands(), int32Operands());
+    RUN_BINARY(testRotR, int64Operands(), int32Operands());
+    RUN_BINARY(testRotL, int32Operands(), int32Operands());
+    RUN_BINARY(testRotL, int64Operands(), int32Operands());
+
+    RUN_BINARY(testRotRWithImmShift, int32Operands(), int32Operands());
+    RUN_BINARY(testRotRWithImmShift, int64Operands(), int32Operands());
+    RUN_BINARY(testRotLWithImmShift, int32Operands(), int32Operands());
+    RUN_BINARY(testRotLWithImmShift, int64Operands(), int32Operands());
+
+    RUN(testComputeDivisionMagic(2, -2147483647, 0));
+    RUN(testTrivialInfiniteLoop());
+    RUN(testFoldPathEqual());
+    
+    RUN(testRShiftSelf32());
+    RUN(testURShiftSelf32());
+    RUN(testLShiftSelf32());
+    RUN(testRShiftSelf64());
+    RUN(testURShiftSelf64());
+    RUN(testLShiftSelf64());
+
+    RUN(testPatchpointDoubleRegs());
+    RUN(testSpillDefSmallerThanUse());
+    RUN(testSpillUseLargerThanDef());
+    RUN(testLateRegister());
+    RUN(testInterpreter());
+    RUN(testReduceStrengthCheckBottomUseInAnotherBlock());
+    RUN(testResetReachabilityDanglingReference());
+    
+    RUN(testEntrySwitchSimple());
+    RUN(testEntrySwitchNoEntrySwitch());
+    RUN(testEntrySwitchWithCommonPaths());
+    RUN(testEntrySwitchWithCommonPathsAndNonTrivialEntrypoint());
+    RUN(testEntrySwitchLoop());
+
+    RUN(testSomeEarlyRegister());
+    RUN(testPatchpointTerminalReturnValue(true));
+    RUN(testPatchpointTerminalReturnValue(false));
+    RUN(testTerminalPatchpointThatNeedsToBeSpilled());
+
+    RUN(testMemoryFence());
+    RUN(testStoreFence());
+    RUN(testLoadFence());
+    RUN(testTrappingLoad());
+    RUN(testTrappingStore());
+    RUN(testTrappingLoadAddStore());
+    RUN(testTrappingLoadDCE());
+    RUN(testTrappingStoreElimination());
+    RUN(testMoveConstants());
+    RUN(testPCOriginMapDoesntInsertNops());
+    RUN(testPinRegisters());
+    RUN(testReduceStrengthReassociation(true));
+    RUN(testReduceStrengthReassociation(false));
+    RUN(testAddShl32());
+    RUN(testAddShl64());
+    RUN(testAddShl65());
+    RUN(testLoadBaseIndexShift2());
+    RUN(testLoadBaseIndexShift32());
+    RUN(testOptimizeMaterialization());
+
+    RUN(testWasmBoundsCheck(0));
+    RUN(testWasmBoundsCheck(100));
+    RUN(testWasmBoundsCheck(10000));
+    RUN(testWasmBoundsCheck(std::numeric_limits::max() - 5));
+    RUN(testWasmAddress());
+
+    if (isX86()) {
+        RUN(testBranchBitAndImmFusion(Identity, Int64, 1, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Identity, Int64, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Trunc, Int32, 1, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Trunc, Int32, 0xff, Air::BranchTest32, Air::Arg::Tmp));
+        RUN(testBranchBitAndImmFusion(Load8S, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load8Z, Int32, 1, Air::BranchTest8, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load, Int32, 1, Air::BranchTest32, Air::Arg::Addr));
+        RUN(testBranchBitAndImmFusion(Load, Int64, 1, Air::BranchTest32, Air::Arg::Addr));
+        RUN(testX86LeaAddAddShlLeft());
+        RUN(testX86LeaAddAddShlRight());
+        RUN(testX86LeaAddAdd());
+        RUN(testX86LeaAddShlRight());
+        RUN(testX86LeaAddShlLeftScale1());
+        RUN(testX86LeaAddShlLeftScale2());
+        RUN(testX86LeaAddShlLeftScale4());
+        RUN(testX86LeaAddShlLeftScale8());
+    }
+
+    if (isARM64()) {
+        RUN(testTernarySubInstructionSelection(Identity, Int64, Air::Sub64));
+        RUN(testTernarySubInstructionSelection(Trunc, Int32, Air::Sub32));
+    }
+
+    if (tasks.isEmpty())
+        usage();
+
+    Lock lock;
+
+    Vector threads;
+    for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
+        threads.append(
+            createThread(
+                "testb3 thread",
+                [&] () {
+                    for (;;) {
+                        RefPtr> task;
+                        {
+                            LockHolder locker(lock);
+                            if (tasks.isEmpty())
+                                return;
+                            task = tasks.takeFirst();
+                        }
+
+                        task->run();
+                    }
+                }));
+    }
+
+    for (ThreadIdentifier thread : threads)
+        waitForThreadCompletion(thread);
+    crashLock.lock();
+}
+
+} // anonymous namespace
+
+#else // ENABLE(B3_JIT)
+
+static void run(const char*)
+{
+    dataLog("B3 JIT is not enabled.\n");
+}
+
+#endif // ENABLE(B3_JIT)
+
+int main(int argc, char** argv)
+{
+    const char* filter = nullptr;
+    switch (argc) {
+    case 1:
+        break;
+    case 2:
+        filter = argv[1];
+        break;
+    default:
+        usage();
+        break;
+    }
+    
+    run(filter);
+    return 0;
+}
+
diff --git a/bindings/ScriptFunctionCall.cpp b/bindings/ScriptFunctionCall.cpp
new file mode 100644
index 0000000..b649c28
--- /dev/null
+++ b/bindings/ScriptFunctionCall.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ScriptFunctionCall.h"
+
+#include "JSCInlines.h"
+#include "JSLock.h"
+#include "ScriptValue.h"
+#include 
+
+using namespace JSC;
+
+namespace Deprecated {
+
+void ScriptCallArgumentHandler::appendArgument(const String& argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsString(m_exec, argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(const char* argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsString(m_exec, String(argument)));
+}
+
+void ScriptCallArgumentHandler::appendArgument(JSValue argument)
+{
+    m_arguments.append(argument);
+}
+
+void ScriptCallArgumentHandler::appendArgument(long argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsNumber(argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(long long argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsNumber(argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(unsigned int argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsNumber(argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(unsigned long argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsNumber(argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(int argument)
+{
+    JSLockHolder lock(m_exec);
+    m_arguments.append(jsNumber(argument));
+}
+
+void ScriptCallArgumentHandler::appendArgument(bool argument)
+{
+    m_arguments.append(jsBoolean(argument));
+}
+
+ScriptFunctionCall::ScriptFunctionCall(const Deprecated::ScriptObject& thisObject, const String& name, ScriptFunctionCallHandler callHandler)
+    : ScriptCallArgumentHandler(thisObject.scriptState())
+    , m_callHandler(callHandler)
+    , m_thisObject(thisObject)
+    , m_name(name)
+{
+}
+
+JSValue ScriptFunctionCall::call(bool& hadException)
+{
+    JSObject* thisObject = m_thisObject.jsObject();
+
+    VM& vm = m_exec->vm();
+    JSLockHolder lock(vm);
+    auto scope = DECLARE_THROW_SCOPE(vm);
+
+    JSValue function = thisObject->get(m_exec, Identifier::fromString(m_exec, m_name));
+    if (UNLIKELY(scope.exception())) {
+        hadException = true;
+        return { };
+    }
+
+    CallData callData;
+    CallType callType = getCallData(function, callData);
+    if (callType == CallType::None)
+        return { };
+
+    JSValue result;
+    NakedPtr exception;
+    if (m_callHandler)
+        result = m_callHandler(m_exec, function, callType, callData, thisObject, m_arguments, exception);
+    else
+        result = JSC::call(m_exec, function, callType, callData, thisObject, m_arguments, exception);
+
+    if (exception) {
+        // Do not treat a terminated execution exception as having an exception. Just treat it as an empty result.
+        hadException = !isTerminatedExecutionException(exception);
+        return { };
+    }
+
+    return result;
+}
+
+JSC::JSValue ScriptFunctionCall::call()
+{
+    bool hadException;
+    return call(hadException);
+}
+
+} // namespace Deprecated
diff --git a/bindings/ScriptFunctionCall.h b/bindings/ScriptFunctionCall.h
new file mode 100644
index 0000000..6978414
--- /dev/null
+++ b/bindings/ScriptFunctionCall.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ArgList.h"
+#include "ScriptObject.h"
+#include 
+
+namespace JSC {
+class JSValue;
+}
+
+namespace Deprecated {
+
+class JS_EXPORT_PRIVATE ScriptCallArgumentHandler {
+public:
+    ScriptCallArgumentHandler(JSC::ExecState* state) : m_exec(state) { }
+
+    void appendArgument(const char*);
+    void appendArgument(const String&);
+    void appendArgument(JSC::JSValue);
+    void appendArgument(long);
+    void appendArgument(long long);
+    void appendArgument(unsigned int);
+    void appendArgument(unsigned long);
+    void appendArgument(int);
+    void appendArgument(bool);
+
+protected:
+    JSC::MarkedArgumentBuffer m_arguments;
+    JSC::ExecState* m_exec;
+
+private:
+    // MarkedArgumentBuffer must be stack allocated, so prevent heap
+    // alloc of ScriptFunctionCall as well.
+    void* operator new(size_t) { ASSERT_NOT_REACHED(); return reinterpret_cast(0xbadbeef); }
+    void* operator new[](size_t) { ASSERT_NOT_REACHED(); return reinterpret_cast(0xbadbeef); }
+};
+
+class JS_EXPORT_PRIVATE ScriptFunctionCall : public ScriptCallArgumentHandler {
+public:
+    typedef JSC::JSValue (*ScriptFunctionCallHandler)(JSC::ExecState* exec, JSC::JSValue functionObject, JSC::CallType callType, const JSC::CallData& callData, JSC::JSValue thisValue, const JSC::ArgList& args, NakedPtr&);
+    ScriptFunctionCall(const ScriptObject& thisObject, const String& name, ScriptFunctionCallHandler handler = nullptr);
+    JSC::JSValue call(bool& hadException);
+    JSC::JSValue call();
+
+protected:
+    ScriptFunctionCallHandler m_callHandler;
+    ScriptObject m_thisObject;
+    String m_name;
+};
+
+} // namespace Deprecated
diff --git a/bindings/ScriptObject.cpp b/bindings/ScriptObject.cpp
new file mode 100644
index 0000000..70422e2
--- /dev/null
+++ b/bindings/ScriptObject.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ScriptObject.h"
+
+#include "JSCInlines.h"
+
+using namespace JSC;
+
+namespace Deprecated {
+
+ScriptObject::ScriptObject(ExecState* scriptState, JSObject* object)
+    : ScriptValue(scriptState->vm(), object)
+    , m_scriptState(scriptState)
+{
+}
+
+ScriptObject::ScriptObject(ExecState* scriptState, const ScriptValue& scriptValue)
+    : ScriptValue(scriptState->vm(), scriptValue.jsValue())
+    , m_scriptState(scriptState)
+{
+}
+
+} // namespace Deprecated
diff --git a/bindings/ScriptObject.h b/bindings/ScriptObject.h
new file mode 100644
index 0000000..baa1ea8
--- /dev/null
+++ b/bindings/ScriptObject.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include "ScriptValue.h"
+
+namespace Deprecated {
+
+class ScriptObject : public ScriptValue {
+public:
+    JS_EXPORT_PRIVATE ScriptObject(JSC::ExecState*, JSC::JSObject*);
+    JS_EXPORT_PRIVATE ScriptObject(JSC::ExecState*, const ScriptValue&);
+    ScriptObject() { }
+
+    operator JSC::JSObject*() const { return jsObject(); }
+
+    JSC::JSObject* jsObject() const { return asObject(jsValue()); }
+    JSC::ExecState* scriptState() const { return m_scriptState; }
+
+private:
+    JSC::ExecState* m_scriptState { nullptr };
+};
+
+} // namespace Deprecated
diff --git a/bindings/ScriptValue.cpp b/bindings/ScriptValue.cpp
new file mode 100644
index 0000000..2a94f3b
--- /dev/null
+++ b/bindings/ScriptValue.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ScriptValue.h"
+
+#include "APICast.h"
+#include "InspectorValues.h"
+#include "JSCInlines.h"
+#include "JSLock.h"
+
+using namespace JSC;
+using namespace Inspector;
+
+namespace Inspector {
+
+static RefPtr jsToInspectorValue(ExecState& scriptState, JSValue value, int maxDepth)
+{
+    if (!value) {
+        ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+
+    if (!maxDepth)
+        return nullptr;
+
+    maxDepth--;
+
+    if (value.isUndefinedOrNull())
+        return InspectorValue::null();
+    if (value.isBoolean())
+        return InspectorValue::create(value.asBoolean());
+    if (value.isNumber() && value.isDouble())
+        return InspectorValue::create(value.asNumber());
+    if (value.isNumber() && value.isAnyInt())
+        return InspectorValue::create(static_cast(value.asAnyInt()));
+    if (value.isString())
+        return InspectorValue::create(asString(value)->value(&scriptState));
+
+    if (value.isObject()) {
+        if (isJSArray(value)) {
+            auto inspectorArray = InspectorArray::create();
+            auto& array = *asArray(value);
+            unsigned length = array.length();
+            for (unsigned i = 0; i < length; i++) {
+                auto elementValue = jsToInspectorValue(scriptState, array.getIndex(&scriptState, i), maxDepth);
+                if (!elementValue)
+                    return nullptr;
+                inspectorArray->pushValue(WTFMove(elementValue));
+            }
+            return WTFMove(inspectorArray);
+        }
+        auto inspectorObject = InspectorObject::create();
+        auto& object = *value.getObject();
+        PropertyNameArray propertyNames(&scriptState, PropertyNameMode::Strings);
+        object.methodTable()->getOwnPropertyNames(&object, &scriptState, propertyNames, EnumerationMode());
+        for (auto& name : propertyNames) {
+            auto inspectorValue = jsToInspectorValue(scriptState, object.get(&scriptState, name), maxDepth);
+            if (!inspectorValue)
+                return nullptr;
+            inspectorObject->setValue(name.string(), WTFMove(inspectorValue));
+        }
+        return WTFMove(inspectorObject);
+    }
+
+    ASSERT_NOT_REACHED();
+    return nullptr;
+}
+
+RefPtr toInspectorValue(ExecState& state, JSValue value)
+{
+    // FIXME: Maybe we should move the JSLockHolder stuff to the callers since this function takes a JSValue directly.
+    // Doing the locking here made sense when we were trying to abstract the difference between multiple JavaScript engines.
+    JSLockHolder holder(&state);
+    return jsToInspectorValue(state, value, InspectorValue::maxDepth);
+}
+
+} // namespace Inspector
+
+namespace Deprecated {
+
+ScriptValue::~ScriptValue()
+{
+}
+
+bool ScriptValue::getString(ExecState* scriptState, String& result) const
+{
+    if (!m_value)
+        return false;
+    JSLockHolder lock(scriptState);
+    if (!m_value.get().getString(scriptState, result))
+        return false;
+    return true;
+}
+
+String ScriptValue::toString(ExecState* scriptState) const
+{
+    VM& vm = scriptState->vm();
+    auto scope = DECLARE_CATCH_SCOPE(vm);
+
+    String result = m_value.get().toWTFString(scriptState);
+    // Handle the case where an exception is thrown as part of invoking toString on the object.
+    if (UNLIKELY(scope.exception()))
+        scope.clearException();
+    return result;
+}
+
+bool ScriptValue::isEqual(ExecState* scriptState, const ScriptValue& anotherValue) const
+{
+    if (hasNoValue())
+        return anotherValue.hasNoValue();
+    return JSValueIsStrictEqual(toRef(scriptState), toRef(scriptState, jsValue()), toRef(scriptState, anotherValue.jsValue()));
+}
+
+bool ScriptValue::isNull() const
+{
+    if (!m_value)
+        return false;
+    return m_value.get().isNull();
+}
+
+bool ScriptValue::isUndefined() const
+{
+    if (!m_value)
+        return false;
+    return m_value.get().isUndefined();
+}
+
+bool ScriptValue::isObject() const
+{
+    if (!m_value)
+        return false;
+    return m_value.get().isObject();
+}
+
+bool ScriptValue::isFunction() const
+{
+    CallData callData;
+    return getCallData(m_value.get(), callData) != CallType::None;
+}
+
+RefPtr ScriptValue::toInspectorValue(ExecState* scriptState) const
+{
+    JSLockHolder holder(scriptState);
+    return jsToInspectorValue(*scriptState, m_value.get(), InspectorValue::maxDepth);
+}
+
+} // namespace Deprecated
diff --git a/bindings/ScriptValue.h b/bindings/ScriptValue.h
new file mode 100644
index 0000000..7eb5072
--- /dev/null
+++ b/bindings/ScriptValue.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2008, 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "JSCJSValueInlines.h"
+#include "Operations.h"
+#include "Strong.h"
+#include "StrongInlines.h"
+#include 
+
+namespace Inspector {
+
+class InspectorValue;
+
+JS_EXPORT_PRIVATE RefPtr toInspectorValue(JSC::ExecState&, JSC::JSValue);
+
+}
+
+namespace Deprecated {
+
+class JS_EXPORT_PRIVATE ScriptValue {
+public:
+    ScriptValue() { }
+    ScriptValue(JSC::VM& vm, JSC::JSValue value) : m_value(vm, value) { }
+    virtual ~ScriptValue();
+
+    operator JSC::JSValue() const { return jsValue(); }
+    JSC::JSValue jsValue() const { return m_value.get(); }
+    bool getString(JSC::ExecState*, String& result) const;
+    String toString(JSC::ExecState*) const;
+    bool isEqual(JSC::ExecState*, const ScriptValue&) const;
+    bool isNull() const;
+    bool isUndefined() const;
+    bool isObject() const;
+    bool isFunction() const;
+    bool hasNoValue() const { return !m_value; }
+
+    void clear() { m_value.clear(); }
+
+    bool operator==(const ScriptValue& other) const { return m_value == other.m_value; }
+
+    RefPtr toInspectorValue(JSC::ExecState*) const;
+
+private:
+    JSC::Strong m_value;
+};
+
+} // namespace Deprecated
diff --git a/builtins/ArrayConstructor.js b/builtins/ArrayConstructor.js
new file mode 100644
index 0000000..73add1a
--- /dev/null
+++ b/builtins/ArrayConstructor.js
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function of(/* items... */)
+{
+    "use strict";
+
+    var length = arguments.length;
+    var array = @isConstructor(this) ? new this(length) : @newArrayWithSize(length);
+    for (var k = 0; k < length; ++k)
+        @putByValDirect(array, k, arguments[k]);
+    array.length = length;
+    return array;
+}
+
+function from(items /*, mapFn, thisArg */)
+{
+    "use strict";
+
+    var thisObj = this;
+
+    var mapFn = @argument(1);
+
+    var thisArg;
+
+    if (mapFn !== @undefined) {
+        if (typeof mapFn !== "function")
+            @throwTypeError("Array.from requires that the second argument, when provided, be a function");
+
+        thisArg = @argument(2);
+    }
+
+    if (items == null)
+        @throwTypeError("Array.from requires an array-like object - not null or undefined");
+
+    var iteratorMethod = items.@iteratorSymbol;
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            @throwTypeError("Array.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        var result = @isConstructor(thisObj) ? new thisObj() : [];
+
+        var k = 0;
+        var iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        var wrapper = {}
+        wrapper.@iteratorSymbol = function() { return iterator; };
+
+        for (var value of wrapper) {
+            if (mapFn)
+                @putByValDirect(result, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(result, k, value);
+            k += 1;
+        }
+
+        result.length = k;
+        return result;
+    }
+
+    var arrayLike = @Object(items);
+    var arrayLikeLength = @toLength(arrayLike.length);
+
+    var result = @isConstructor(thisObj) ? new thisObj(arrayLikeLength) : @newArrayWithSize(arrayLikeLength);
+
+    var k = 0;
+    while (k < arrayLikeLength) {
+        var value = arrayLike[k];
+        if (mapFn)
+            @putByValDirect(result, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+        else
+            @putByValDirect(result, k, value);
+        k += 1;
+    }
+
+    result.length = arrayLikeLength;
+    return result;
+}
+
+function isArray(array)
+{
+    "use strict";
+
+    if (@isJSArray(array) || @isDerivedArray(array))
+        return true;
+    if (!@isProxyObject(array))
+        return false;
+    return @isArraySlow(array);
+}
diff --git a/builtins/ArrayIteratorPrototype.js b/builtins/ArrayIteratorPrototype.js
new file mode 100644
index 0000000..92640f7
--- /dev/null
+++ b/builtins/ArrayIteratorPrototype.js
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function next()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("%ArrayIteratorPrototype%.next requires that |this| not be null or undefined");
+
+    let next = this.@arrayIteratorNext;
+    if (next === @undefined)
+        @throwTypeError("%ArrayIteratorPrototype%.next requires that |this| be an Array Iterator instance");
+
+    return next.@call(this);
+}
+
+@globalPrivate
+function arrayIteratorValueNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = array[index];
+        }
+    }
+
+    return { done, value };
+}
+
+@globalPrivate
+function arrayIteratorKeyNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = index;
+        }
+    }
+
+    return { done, value };
+}
+
+@globalPrivate
+function arrayIteratorKeyValueNext()
+{
+    "use strict";
+    var done = true;
+    var value;
+
+    var array = this.@iteratedObject;
+    if (!this.@arrayIteratorIsDone) {
+        var index = this.@arrayIteratorNextIndex;
+        var length = array.length >>> 0;
+        if (index >= length) {
+            this.@arrayIteratorIsDone = true;
+        } else {
+            this.@arrayIteratorNextIndex = index + 1;
+            done = false;
+            value = [ index, array[index] ];
+        }
+    }
+
+    return { done, value };
+}
diff --git a/builtins/ArrayPrototype.js b/builtins/ArrayPrototype.js
new file mode 100644
index 0000000..55090c6
--- /dev/null
+++ b/builtins/ArrayPrototype.js
@@ -0,0 +1,782 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@constructor
+@globalPrivate
+function createArrayIterator(iteratedObject, kind, iterationFunction)
+{
+    this.@iteratedObject = iteratedObject;
+    this.@arrayIteratorKind = kind;
+    this.@arrayIteratorNextIndex = 0;
+    this.@arrayIteratorNext = iterationFunction;
+    this.@arrayIteratorIsDone = false;
+}
+
+function values()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.values requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "value", @arrayIteratorValueNext);
+}
+
+function keys()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.keys requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "key", @arrayIteratorKeyNext);
+}
+
+function entries()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.entries requires that |this| not be null or undefined");
+
+    return new @createArrayIterator(@Object(this), "key+value", @arrayIteratorKeyValueNext);
+}
+
+function reduce(callback /*, initialValue */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.reduce requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.reduce callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("reduce of empty array with no initial value");
+
+    var accumulator, k = 0;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else {
+        while (k < length && !(k in array))
+            k += 1;
+        if (k >= length)
+            @throwTypeError("reduce of empty array with no initial value");
+        accumulator = array[k++];
+    }
+
+    while (k < length) {
+        if (k in array)
+            accumulator = callback.@call(@undefined, accumulator, array[k], k, array);
+        k += 1;
+    }
+    return accumulator;
+}
+
+function reduceRight(callback /*, initialValue */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.reduceRight requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.reduceRight callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("reduceRight of empty array with no initial value");
+
+    var accumulator, k = length - 1;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else {
+        while (k >= 0 && !(k in array))
+            k -= 1;
+        if (k < 0)
+            @throwTypeError("reduceRight of empty array with no initial value");
+        accumulator = array[k--];
+    }
+
+    while (k >= 0) {
+        if (k in array)
+            accumulator = callback.@call(@undefined, accumulator, array[k], k, array);
+        k -= 1;
+    }
+    return accumulator;
+}
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.every requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.every callback must be a function");
+    
+    var thisArg = @argument(1);
+    
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (!callback.@call(thisArg, array[i], i, array))
+            return false;
+    }
+    
+    return true;
+}
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.forEach requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.forEach callback must be a function");
+    
+    var thisArg = @argument(1);
+    
+    for (var i = 0; i < length; i++) {
+        if (i in array)
+            callback.@call(thisArg, array[i], i, array);
+    }
+}
+
+function filter(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.filter requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.filter callback must be a function");
+    
+    var thisArg = @argument(1);
+
+    // Do 9.4.2.3 ArraySpeciesCreate
+    var result;
+    var constructor;
+    if (@isArray(array)) {
+        constructor = array.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @undefined;
+        }
+    }
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(0);
+    else
+        result = new constructor(0);
+
+    var nextIndex = 0;
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        var current = array[i]
+        if (callback.@call(thisArg, current, i, array)) {
+            @putByValDirect(result, nextIndex, current);
+            ++nextIndex;
+        }
+    }
+    return result;
+}
+
+function map(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.map requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.map callback must be a function");
+    
+    var thisArg = @argument(1);
+
+    // Do 9.4.2.3 ArraySpeciesCreate
+    var result;
+    var constructor;
+    if (@isArray(array)) {
+        constructor = array.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @undefined;
+        }
+    }
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(length);
+    else
+        result = new constructor(length);
+
+    var nextIndex = 0;
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        var mappedValue = callback.@call(thisArg, array[i], i, array);
+        @putByValDirect(result, i, mappedValue);
+    }
+    return result;
+}
+
+function some(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.some requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.some callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        if (!(i in array))
+            continue;
+        if (callback.@call(thisArg, array[i], i, array))
+            return true;
+    }
+    return false;
+}
+
+function fill(value /* [, start [, end]] */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.fill requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    var relativeStart = @toInteger(@argument(1));
+    var k = 0;
+    if (relativeStart < 0) {
+        k = length + relativeStart;
+        if (k < 0)
+            k = 0;
+    } else {
+        k = relativeStart;
+        if (k > length)
+            k = length;
+    }
+    var relativeEnd = length;
+    var end = @argument(2);
+    if (end !== @undefined)
+        relativeEnd = @toInteger(end);
+    var final = 0;
+    if (relativeEnd < 0) {
+        final = length + relativeEnd;
+        if (final < 0)
+            final = 0;
+    } else {
+        final = relativeEnd;
+        if (final > length)
+            final = length;
+    }
+    for (; k < final; k++)
+        array[k] = value;
+    return array;
+}
+
+function find(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.find requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.find callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        var kValue = array[i];
+        if (callback.@call(thisArg, kValue, i, array))
+            return kValue;
+    }
+    return @undefined;
+}
+
+function findIndex(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.findIndex requires that |this| not be null or undefined");
+    
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (typeof callback !== "function")
+        @throwTypeError("Array.prototype.findIndex callback must be a function");
+    
+    var thisArg = @argument(1);
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, array[i], i, array))
+            return i;
+    }
+    return -1;
+}
+
+function includes(searchElement /*, fromIndex*/)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.includes requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    if (length === 0)
+        return false;
+
+    var fromIndex = 0;
+    var from = @argument(1);
+    if (from !== @undefined)
+        fromIndex = @toInteger(from);
+
+    var index;
+    if (fromIndex >= 0)
+        index = fromIndex;
+    else
+        index = length + fromIndex;
+
+    if (index < 0)
+        index = 0;
+
+    var currentElement;
+    for (; index < length; ++index) {
+        currentElement = array[index];
+        // Use SameValueZero comparison, rather than just StrictEquals.
+        if (searchElement === currentElement || (searchElement !== searchElement && currentElement !== currentElement))
+            return true;
+    }
+    return false;
+}
+
+function sort(comparator)
+{
+    "use strict";
+
+    function min(a, b)
+    {
+        return a < b ? a : b;
+    }
+
+    function stringComparator(a, b)
+    {
+        var aString = a.string;
+        var bString = b.string;
+
+        var aLength = aString.length;
+        var bLength = bString.length;
+        var length = min(aLength, bLength);
+
+        for (var i = 0; i < length; ++i) {
+            var aCharCode = aString.@charCodeAt(i);
+            var bCharCode = bString.@charCodeAt(i);
+
+            if (aCharCode == bCharCode)
+                continue;
+
+            return aCharCode - bCharCode;
+        }
+
+        return aLength - bLength;
+    }
+
+    // Move undefineds and holes to the end of a sparse array. Result is [values..., undefineds..., holes...].
+    function compactSparse(array, dst, src, length)
+    {
+        var values = [ ];
+        var seen = { };
+        var valueCount = 0;
+        var undefinedCount = 0;
+
+        // Clean up after the in-progress non-sparse compaction that failed.
+        for (var i = dst; i < src; ++i)
+            delete array[i];
+
+        for (var object = array; object; object = @Object.@getPrototypeOf(object)) {
+            var propertyNames = @Object.@getOwnPropertyNames(object);
+            for (var i = 0; i < propertyNames.length; ++i) {
+                var index = propertyNames[i];
+                if (index < length) { // Exclude non-numeric properties and properties past length.
+                    if (seen[index]) // Exclude duplicates.
+                        continue;
+                    seen[index] = 1;
+
+                    var value = array[index];
+                    delete array[index];
+
+                    if (value === @undefined) {
+                        ++undefinedCount;
+                        continue;
+                    }
+
+                    array[valueCount++] = value;
+                }
+            }
+        }
+
+        for (var i = valueCount; i < valueCount + undefinedCount; ++i)
+            array[i] = @undefined;
+
+        return valueCount;
+    }
+
+    function compactSlow(array, length)
+    {
+        var holeCount = 0;
+
+        for (var dst = 0, src = 0; src < length; ++src) {
+            if (!(src in array)) {
+                ++holeCount;
+                if (holeCount < 256)
+                    continue;
+                return compactSparse(array, dst, src, length);
+            }
+
+            var value = array[src];
+            if (value === @undefined)
+                continue;
+
+            array[dst++] = value;
+        }
+
+        var valueCount = dst;
+        var undefinedCount = length - valueCount - holeCount;
+
+        for (var i = valueCount; i < valueCount + undefinedCount; ++i)
+            array[i] = @undefined;
+
+        for (var i = valueCount + undefinedCount; i < length; ++i)
+            delete array[i];
+
+        return valueCount;
+    }
+
+    // Move undefineds and holes to the end of an array. Result is [values..., undefineds..., holes...].
+    function compact(array, length)
+    {
+        for (var i = 0; i < array.length; ++i) {
+            if (array[i] === @undefined)
+                return compactSlow(array, length);
+        }
+
+        return length;
+    }
+
+    function merge(dst, src, srcIndex, srcEnd, width, comparator)
+    {
+        var left = srcIndex;
+        var leftEnd = min(left + width, srcEnd);
+        var right = leftEnd;
+        var rightEnd = min(right + width, srcEnd);
+
+        for (var dstIndex = left; dstIndex < rightEnd; ++dstIndex) {
+            if (right < rightEnd) {
+                if (left >= leftEnd || comparator(src[right], src[left]) < 0) {
+                    dst[dstIndex] = src[right++];
+                    continue;
+                }
+            }
+
+            dst[dstIndex] = src[left++];
+        }
+    }
+
+    function mergeSort(array, valueCount, comparator)
+    {
+        var buffer = [ ];
+        buffer.length = valueCount;
+
+        var dst = buffer;
+        var src = array;
+        for (var width = 1; width < valueCount; width *= 2) {
+            for (var srcIndex = 0; srcIndex < valueCount; srcIndex += 2 * width)
+                merge(dst, src, srcIndex, valueCount, width, comparator);
+
+            var tmp = src;
+            src = dst;
+            dst = tmp;
+        }
+
+        if (src != array) {
+            for(var i = 0; i < valueCount; i++)
+                array[i] = src[i];
+        }
+    }
+
+    function bucketSort(array, dst, bucket, depth)
+    {
+        if (bucket.length < 32 || depth > 32) {
+            mergeSort(bucket, bucket.length, stringComparator);
+            for (var i = 0; i < bucket.length; ++i)
+                array[dst++] = bucket[i].value;
+            return dst;
+        }
+
+        var buckets = [ ];
+        for (var i = 0; i < bucket.length; ++i) {
+            var entry = bucket[i];
+            var string = entry.string;
+            if (string.length == depth) {
+                array[dst++] = entry.value;
+                continue;
+            }
+
+            var c = string.@charCodeAt(depth);
+            if (!buckets[c])
+                buckets[c] = [ ];
+            buckets[c][buckets[c].length] = entry;
+        }
+
+        for (var i = 0; i < buckets.length; ++i) {
+            if (!buckets[i])
+                continue;
+            dst = bucketSort(array, dst, buckets[i], depth + 1);
+        }
+
+        return dst;
+    }
+
+    function comparatorSort(array, length, comparator)
+    {
+        var valueCount = compact(array, length);
+        mergeSort(array, valueCount, comparator);
+    }
+
+    function stringSort(array, length)
+    {
+        var valueCount = compact(array, length);
+
+        var strings = @newArrayWithSize(valueCount);
+        for (var i = 0; i < valueCount; ++i)
+            strings[i] = { string: @toString(array[i]), value: array[i] };
+
+        bucketSort(array, 0, strings, 0);
+    }
+
+    if (this == null)
+        @throwTypeError("Array.prototype.sort requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+
+    var length = array.length >>> 0;
+
+    // For compatibility with Firefox and Chrome, do nothing observable
+    // to the target array if it has 0 or 1 sortable properties.
+    if (length < 2)
+        return array;
+
+    if (typeof comparator == "function")
+        comparatorSort(array, length, comparator);
+    else if (comparator === @undefined)
+        stringSort(array, length);
+    else
+        @throwTypeError("Array.prototype.sort requires the comparsion function be a function or undefined");
+
+    return array;
+}
+
+function concatSlowPath()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("Array.prototype.concat requires that |this| not be null or undefined");
+
+    var currentElement = @Object(this);
+
+    var constructor;
+    if (@isArray(currentElement)) {
+        constructor = currentElement.constructor;
+        // We have this check so that if some array from a different global object
+        // calls this map they don't get an array with the Array.prototype of the
+        // other global object.
+        if (@isArrayConstructor(constructor) && @Array !== constructor)
+            constructor = @undefined;
+        else if (@isObject(constructor)) {
+            constructor = constructor.@speciesSymbol;
+            if (constructor === null)
+                constructor = @Array;
+        }
+    }
+
+    var argCount = arguments.length;
+    var result;
+    if (constructor === @Array || constructor === @undefined)
+        result = @newArrayWithSize(0);
+    else
+        result = new constructor(0);
+    var resultIsArray = @isJSArray(result);
+
+    var resultIndex = 0;
+    var argIndex = 0;
+
+    do {
+        let spreadable = @isObject(currentElement) && currentElement.@isConcatSpreadableSymbol;
+        if ((spreadable === @undefined && @isArray(currentElement)) || spreadable) {
+            let length = @toLength(currentElement.length);
+            if (length + resultIndex > @MAX_ARRAY_INDEX)
+                @throwRangeError("Length exceeded the maximum array length");
+            if (resultIsArray && @isJSArray(currentElement)) {
+                @appendMemcpy(result, currentElement, resultIndex);
+                resultIndex += length;
+            } else {
+                for (var i = 0; i < length; i++) {
+                    if (i in currentElement)
+                        @putByValDirect(result, resultIndex, currentElement[i]);
+                    resultIndex++;
+                }
+            }
+        } else {
+            if (resultIndex >= @MAX_ARRAY_INDEX)
+                @throwRangeError("Length exceeded the maximum array length");
+            @putByValDirect(result, resultIndex++, currentElement);
+        }
+        currentElement = arguments[argIndex];
+    } while (argIndex++ < argCount);
+
+    result.length = resultIndex;
+    return result;
+}
+
+function concat(first)
+{
+    "use strict";
+
+    if (@argumentCount() === 1
+        && @isJSArray(this)
+        && this.@isConcatSpreadableSymbol === @undefined
+        && (!@isObject(first) || first.@isConcatSpreadableSymbol === @undefined)) {
+
+        let result = @concatMemcpy(this, first);
+        if (result !== null)
+            return result;
+    }
+
+    return @tailCallForwardArguments(@concatSlowPath, this);
+}
+
+function copyWithin(target, start /*, end */)
+{
+    "use strict";
+
+    function maxWithPositives(a, b)
+    {
+        return (a < b) ? b : a;
+    }
+
+    function minWithMaybeNegativeZeroAndPositive(maybeNegativeZero, positive)
+    {
+        return (maybeNegativeZero < positive) ? maybeNegativeZero : positive;
+    }
+
+    if (this == null)
+        @throwTypeError("Array.copyWithin requires that |this| not be null or undefined");
+
+    var array = @Object(this);
+    var length = @toLength(array.length);
+
+    var relativeTarget = @toInteger(target);
+    var to = (relativeTarget < 0) ? maxWithPositives(length + relativeTarget, 0) : minWithMaybeNegativeZeroAndPositive(relativeTarget, length);
+
+    var relativeStart = @toInteger(start);
+    var from = (relativeStart < 0) ? maxWithPositives(length + relativeStart, 0) : minWithMaybeNegativeZeroAndPositive(relativeStart, length);
+
+    var relativeEnd;
+    var end = @argument(2);
+    if (end === @undefined)
+        relativeEnd = length;
+    else
+        relativeEnd = @toInteger(end);
+
+    var finalValue = (relativeEnd < 0) ? maxWithPositives(length + relativeEnd, 0) : minWithMaybeNegativeZeroAndPositive(relativeEnd, length);
+
+    var count = minWithMaybeNegativeZeroAndPositive(finalValue - from, length - to);
+
+    var direction = 1;
+    if (from < to && to < from + count) {
+        direction = -1;
+        from = from + count - 1;
+        to = to + count - 1;
+    }
+
+    for (var i = 0; i < count; ++i, from += direction, to += direction) {
+        if (from in array)
+            array[to] = array[from];
+        else
+            delete array[to];
+    }
+
+    return array;
+}
diff --git a/builtins/AsyncFunctionPrototype.js b/builtins/AsyncFunctionPrototype.js
new file mode 100644
index 0000000..88cfb01
--- /dev/null
+++ b/builtins/AsyncFunctionPrototype.js
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Caitlin Potter .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function asyncFunctionResume(generator, promiseCapability, sentValue, resumeMode)
+{
+    "use strict";
+    let state = generator.@generatorState;
+    let value = @undefined;
+
+    if (state === @GeneratorStateCompleted || (resumeMode !== @GeneratorResumeModeNormal && resumeMode !== @GeneratorResumeModeThrow))
+        @throwTypeError("Async function illegally resumed");
+
+    try {
+        generator.@generatorState = @GeneratorStateExecuting;
+        value = generator.@generatorNext.@call(generator.@generatorThis, generator, state, sentValue, resumeMode, generator.@generatorFrame);
+        if (generator.@generatorState === @GeneratorStateExecuting) {
+            generator.@generatorState = @GeneratorStateCompleted;
+            promiseCapability.@resolve(value);
+            return promiseCapability.@promise;
+        }
+    } catch (error) {
+        generator.@generatorState = @GeneratorStateCompleted;
+        promiseCapability.@reject(error);
+        return promiseCapability.@promise;
+    }
+
+    let wrappedValue = @newPromiseCapability(@Promise);
+    wrappedValue.@resolve.@call(@undefined, value);
+
+    wrappedValue.@promise.@then(
+        function(value) { @asyncFunctionResume(generator, promiseCapability, value, @GeneratorResumeModeNormal); },
+        function(error) { @asyncFunctionResume(generator, promiseCapability, error, @GeneratorResumeModeThrow); });
+
+    return promiseCapability.@promise;
+}
diff --git a/builtins/BuiltinExecutableCreator.cpp b/builtins/BuiltinExecutableCreator.cpp
new file mode 100644
index 0000000..2b79e7e
--- /dev/null
+++ b/builtins/BuiltinExecutableCreator.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BuiltinExecutableCreator.h"
+
+#include "BuiltinExecutables.h"
+
+namespace JSC {
+
+UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& source, const Identifier& ident, ConstructorKind kind, ConstructAbility ability)
+{
+    return BuiltinExecutables::createExecutable(vm, source, ident, kind, ability);
+}
+    
+} // namespace JSC
diff --git a/builtins/BuiltinExecutableCreator.h b/builtins/BuiltinExecutableCreator.h
new file mode 100644
index 0000000..19c0884
--- /dev/null
+++ b/builtins/BuiltinExecutableCreator.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstructAbility.h"
+#include "ParserModes.h"
+#include "SourceCode.h"
+
+namespace JSC {
+
+JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ConstructorKind, ConstructAbility);
+
+} // namespace JSC
diff --git a/builtins/BuiltinExecutables.cpp b/builtins/BuiltinExecutables.cpp
new file mode 100644
index 0000000..c741e5b
--- /dev/null
+++ b/builtins/BuiltinExecutables.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "config.h"
+#include "BuiltinExecutables.h"
+
+#include "BuiltinNames.h"
+#include "JSCInlines.h"
+#include "Parser.h"
+#include 
+
+namespace JSC {
+
+BuiltinExecutables::BuiltinExecutables(VM& vm)
+    : m_vm(vm)
+#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(makeSource(StringImpl::createFromLiteral(s_##name, length)))
+    JSC_FOREACH_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
+#undef EXPOSE_BUILTIN_STRINGS
+{
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createDefaultConstructor(ConstructorKind constructorKind, const Identifier& name)
+{
+    static NeverDestroyed baseConstructorCode(ASCIILiteral("(function () { })"));
+    static NeverDestroyed derivedConstructorCode(ASCIILiteral("(function (...args) { super(...args); })"));
+
+    switch (constructorKind) {
+    case ConstructorKind::None:
+        break;
+    case ConstructorKind::Base:
+        return createExecutable(m_vm, makeSource(baseConstructorCode), name, constructorKind, ConstructAbility::CanConstruct);
+    case ConstructorKind::Extends:
+        return createExecutable(m_vm, makeSource(derivedConstructorCode), name, constructorKind, ConstructAbility::CanConstruct);
+    }
+    ASSERT_NOT_REACHED();
+    return nullptr;
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createBuiltinExecutable(const SourceCode& code, const Identifier& name, ConstructAbility constructAbility)
+{
+    return createExecutable(m_vm, code, name, ConstructorKind::None, constructAbility);
+}
+
+UnlinkedFunctionExecutable* createBuiltinExecutable(VM& vm, const SourceCode& code, const Identifier& name, ConstructAbility constructAbility)
+{
+    return BuiltinExecutables::createExecutable(vm, code, name, ConstructorKind::None, constructAbility);
+}
+
+UnlinkedFunctionExecutable* BuiltinExecutables::createExecutable(VM& vm, const SourceCode& source, const Identifier& name, ConstructorKind constructorKind, ConstructAbility constructAbility)
+{
+    JSTextPosition positionBeforeLastNewline;
+    ParserError error;
+    bool isParsingDefaultConstructor = constructorKind != ConstructorKind::None;
+    JSParserBuiltinMode builtinMode = isParsingDefaultConstructor ? JSParserBuiltinMode::NotBuiltin : JSParserBuiltinMode::Builtin;
+    UnlinkedFunctionKind kind = isParsingDefaultConstructor ? UnlinkedNormalFunction : UnlinkedBuiltinFunction;
+    SourceCode parentSourceOverride = isParsingDefaultConstructor ? source : SourceCode();
+    std::unique_ptr program = parse(
+        &vm, source, Identifier(), builtinMode,
+        JSParserStrictMode::NotStrict, JSParserScriptMode::Classic, SourceParseMode::ProgramMode, SuperBinding::NotNeeded, error,
+        &positionBeforeLastNewline, constructorKind);
+
+    if (!program) {
+        dataLog("Fatal error compiling builtin function '", name.string(), "': ", error.message());
+        CRASH();
+    }
+
+    StatementNode* exprStatement = program->singleStatement();
+    RELEASE_ASSERT(exprStatement);
+    RELEASE_ASSERT(exprStatement->isExprStatement());
+    ExpressionNode* funcExpr = static_cast(exprStatement)->expr();
+    RELEASE_ASSERT(funcExpr);
+    RELEASE_ASSERT(funcExpr->isFuncExprNode());
+    FunctionMetadataNode* metadata = static_cast(funcExpr)->metadata();
+    RELEASE_ASSERT(!program->hasCapturedVariables());
+    
+    metadata->setEndPosition(positionBeforeLastNewline);
+    RELEASE_ASSERT(metadata);
+    RELEASE_ASSERT(metadata->ident().isNull());
+    
+    // This function assumes an input string that would result in a single anonymous function expression.
+    metadata->setEndPosition(positionBeforeLastNewline);
+    RELEASE_ASSERT(metadata);
+    metadata->overrideName(name);
+    VariableEnvironment dummyTDZVariables;
+    UnlinkedFunctionExecutable* functionExecutable = UnlinkedFunctionExecutable::create(&vm, source, metadata, kind, constructAbility, JSParserScriptMode::Classic, dummyTDZVariables, DerivedContextType::None, WTFMove(parentSourceOverride));
+    return functionExecutable;
+}
+
+void BuiltinExecutables::finalize(Handle, void* context)
+{
+    static_cast*>(context)->clear();
+}
+
+#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \
+UnlinkedFunctionExecutable* BuiltinExecutables::name##Executable() \
+{\
+    if (!m_##name##Executable)\
+        m_##name##Executable = Weak(createBuiltinExecutable(m_##name##Source, m_vm.propertyNames->builtinNames().functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\
+    return m_##name##Executable.get();\
+}
+JSC_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_SOURCES
+
+}
diff --git a/builtins/BuiltinExecutables.h b/builtins/BuiltinExecutables.h
new file mode 100644
index 0000000..ee0eaad
--- /dev/null
+++ b/builtins/BuiltinExecutables.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCBuiltins.h"
+#include "ParserModes.h"
+#include "SourceCode.h"
+#include "Weak.h"
+#include "WeakHandleOwner.h"
+
+namespace JSC {
+
+class UnlinkedFunctionExecutable;
+class Identifier;
+class VM;
+
+class BuiltinExecutables final: private WeakHandleOwner {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    explicit BuiltinExecutables(VM&);
+
+#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \
+UnlinkedFunctionExecutable* name##Executable(); \
+const SourceCode& name##Source() { return m_##name##Source; }
+    
+    JSC_FOREACH_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
+#undef EXPOSE_BUILTIN_SOURCES
+
+    UnlinkedFunctionExecutable* createDefaultConstructor(ConstructorKind, const Identifier& name);
+
+    static UnlinkedFunctionExecutable* createExecutable(VM&, const SourceCode&, const Identifier&, ConstructorKind, ConstructAbility);
+private:
+    void finalize(Handle, void* context) override;
+
+    VM& m_vm;
+
+    UnlinkedFunctionExecutable* createBuiltinExecutable(const SourceCode&, const Identifier&, ConstructAbility);
+
+#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length)\
+    SourceCode m_##name##Source; \
+    Weak m_##name##Executable;
+    JSC_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
+#undef DECLARE_BUILTIN_SOURCE_MEMBERS
+};
+
+}
diff --git a/builtins/BuiltinNames.h b/builtins/BuiltinNames.h
new file mode 100644
index 0000000..f9045bf
--- /dev/null
+++ b/builtins/BuiltinNames.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BuiltinUtils.h"
+#include "BytecodeIntrinsicRegistry.h"
+#include "CommonIdentifiers.h"
+#include "JSCBuiltins.h"
+
+namespace JSC {
+
+#define JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(macro) \
+    JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+    macro(arrayIteratorNextIndex) \
+    macro(arrayIterationKind) \
+    macro(arrayIteratorNext) \
+    macro(arrayIteratorIsDone) \
+    macro(arrayIteratorKind) \
+    macro(charCodeAt) \
+    macro(isView) \
+    macro(iteratedObject) \
+    macro(iteratedString) \
+    macro(stringIteratorNextIndex) \
+    macro(promise) \
+    macro(fulfillmentHandler) \
+    macro(rejectionHandler) \
+    macro(index) \
+    macro(deferred) \
+    macro(countdownHolder) \
+    macro(Object) \
+    macro(ownEnumerablePropertyKeys) \
+    macro(Number) \
+    macro(Array) \
+    macro(ArrayBuffer) \
+    macro(String) \
+    macro(RegExp) \
+    macro(Map) \
+    macro(Promise) \
+    macro(Reflect) \
+    macro(InternalPromise) \
+    macro(abs) \
+    macro(floor) \
+    macro(trunc) \
+    macro(create) \
+    macro(defineProperty) \
+    macro(getPrototypeOf) \
+    macro(getOwnPropertyDescriptor) \
+    macro(getOwnPropertyNames) \
+    macro(ownKeys) \
+    macro(Error) \
+    macro(RangeError) \
+    macro(TypeError) \
+    macro(typedArrayLength) \
+    macro(typedArraySort) \
+    macro(typedArrayGetOriginalConstructor) \
+    macro(typedArraySubarrayCreate) \
+    macro(BuiltinLog) \
+    macro(homeObject) \
+    macro(getTemplateObject) \
+    macro(templateRegistryKey) \
+    macro(enqueueJob) \
+    macro(promiseState) \
+    macro(promiseReactions) \
+    macro(promiseResult) \
+    macro(onFulfilled) \
+    macro(onRejected) \
+    macro(push) \
+    macro(repeatCharacter) \
+    macro(capabilities) \
+    macro(starDefault) \
+    macro(InspectorInstrumentation) \
+    macro(get) \
+    macro(set) \
+    macro(shift) \
+    macro(allocateTypedArray) \
+    macro(Int8Array) \
+    macro(Int16Array) \
+    macro(Int32Array) \
+    macro(Uint8Array) \
+    macro(Uint8ClampedArray) \
+    macro(Uint16Array) \
+    macro(Uint32Array) \
+    macro(Float32Array) \
+    macro(Float64Array) \
+    macro(exec) \
+    macro(generator) \
+    macro(generatorNext) \
+    macro(generatorState) \
+    macro(generatorFrame) \
+    macro(generatorValue) \
+    macro(generatorThis) \
+    macro(generatorResumeMode) \
+    macro(Collator) \
+    macro(DateTimeFormat) \
+    macro(NumberFormat) \
+    macro(intlSubstituteValue) \
+    macro(thisTimeValue) \
+    macro(thisNumberValue) \
+    macro(newTargetLocal) \
+    macro(derivedConstructor) \
+    macro(isTypedArrayView) \
+    macro(isBoundFunction) \
+    macro(hasInstanceBoundFunction) \
+    macro(instanceOf) \
+    macro(isArraySlow) \
+    macro(isArrayConstructor) \
+    macro(isConstructor) \
+    macro(isDerivedConstructor) \
+    macro(concatMemcpy) \
+    macro(appendMemcpy) \
+    macro(predictFinalLengthFromArgumunts) \
+    macro(print) \
+    macro(regExpCreate) \
+    macro(SetIterator) \
+    macro(setIteratorNext) \
+    macro(replaceUsingRegExp) \
+    macro(replaceUsingStringSearch) \
+    macro(MapIterator) \
+    macro(mapIteratorNext) \
+    macro(regExpBuiltinExec) \
+    macro(regExpMatchFast) \
+    macro(regExpProtoFlagsGetter) \
+    macro(regExpProtoGlobalGetter) \
+    macro(regExpProtoIgnoreCaseGetter) \
+    macro(regExpProtoMultilineGetter) \
+    macro(regExpProtoSourceGetter) \
+    macro(regExpProtoStickyGetter) \
+    macro(regExpProtoUnicodeGetter) \
+    macro(regExpPrototypeSymbolReplace) \
+    macro(regExpReplaceFast) \
+    macro(regExpSearchFast) \
+    macro(regExpSplitFast) \
+    macro(regExpTestFast) \
+    macro(stringIncludesInternal) \
+    macro(stringSplitFast) \
+    macro(stringSubstrInternal) \
+    macro(makeBoundFunction) \
+    macro(hasOwnLengthProperty) \
+    macro(WebAssembly) \
+    macro(Module) \
+    macro(Instance) \
+    macro(Memory) \
+    macro(Table) \
+    macro(CompileError) \
+    macro(RuntimeError) \
+
+
+#define INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY(name) m_privateToPublicMap.add(m_##name##PrivateName.impl(), &m_##name);
+#define INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY(name) m_publicToPrivateMap.add(m_##name.impl(), &m_##name##PrivateName);
+
+// We commandeer the publicToPrivateMap to allow us to convert private symbol names into the appropriate symbol.
+// e.g. @iteratorSymbol points to Symbol.iterator in this map rather than to a an actual private name.
+// FIXME: This is a weird hack and we shouldn't need to do this.
+#define INITIALIZE_SYMBOL_PUBLIC_TO_PRIVATE_ENTRY(name) m_publicToPrivateMap.add(m_##name##SymbolPrivateIdentifier.impl(), &m_##name##Symbol);
+
+class BuiltinNames {
+    WTF_MAKE_NONCOPYABLE(BuiltinNames); WTF_MAKE_FAST_ALLOCATED;
+    
+public:
+    // We treat the dollarVM name as a special case below for $vm (because CommonIdentifiers does not
+    // yet support the $ character).
+
+    BuiltinNames(VM* vm, CommonIdentifiers* commonIdentifiers)
+        : m_emptyIdentifier(commonIdentifiers->emptyIdentifier)
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_BUILTIN_NAMES)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(INITIALIZE_BUILTIN_SYMBOLS)
+        , m_dollarVMName(Identifier::fromString(vm, "$vm"))
+        , m_dollarVMPrivateName(Identifier::fromUid(PrivateName(PrivateName::Description, ASCIILiteral("PrivateSymbol.$vm"))))
+    {
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_PRIVATE_TO_PUBLIC_ENTRY)
+        JSC_FOREACH_BUILTIN_FUNCTION_NAME(INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(INITIALIZE_PUBLIC_TO_PRIVATE_ENTRY)
+        JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(INITIALIZE_SYMBOL_PUBLIC_TO_PRIVATE_ENTRY)
+        m_privateToPublicMap.add(m_dollarVMPrivateName.impl(), &m_dollarVMName);
+        m_publicToPrivateMap.add(m_dollarVMName.impl(), &m_dollarVMPrivateName);
+    }
+
+    bool isPrivateName(SymbolImpl& uid) const;
+    bool isPrivateName(UniquedStringImpl& uid) const;
+    bool isPrivateName(const Identifier&) const;
+    const Identifier* lookUpPrivateName(const Identifier&) const;
+    const Identifier& lookUpPublicName(const Identifier&) const;
+    
+    void appendExternalName(const Identifier& publicName, const Identifier& privateName);
+
+    JSC_FOREACH_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(DECLARE_BUILTIN_SYMBOL_ACCESSOR)
+    const JSC::Identifier& dollarVMPublicName() const { return m_dollarVMName; }
+    const JSC::Identifier& dollarVMPrivateName() const { return m_dollarVMPrivateName; }
+
+private:
+    Identifier m_emptyIdentifier;
+    JSC_FOREACH_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_PROPERTY_NAME(DECLARE_BUILTIN_NAMES)
+    JSC_COMMON_PRIVATE_IDENTIFIERS_EACH_WELL_KNOWN_SYMBOL(DECLARE_BUILTIN_SYMBOLS)
+    const JSC::Identifier m_dollarVMName;
+    const JSC::Identifier m_dollarVMPrivateName;
+    typedef HashMap, const Identifier*, IdentifierRepHash> BuiltinNamesMap;
+    BuiltinNamesMap m_publicToPrivateMap;
+    BuiltinNamesMap m_privateToPublicMap;
+};
+
+inline bool BuiltinNames::isPrivateName(SymbolImpl& uid) const
+{
+    return m_privateToPublicMap.contains(&uid);
+}
+
+inline bool BuiltinNames::isPrivateName(UniquedStringImpl& uid) const
+{
+    if (!uid.isSymbol())
+        return false;
+    return m_privateToPublicMap.contains(&uid);
+}
+
+inline bool BuiltinNames::isPrivateName(const Identifier& ident) const
+{
+    if (ident.isNull())
+        return false;
+    return isPrivateName(*ident.impl());
+}
+
+inline const Identifier* BuiltinNames::lookUpPrivateName(const Identifier& ident) const
+{
+    auto iter = m_publicToPrivateMap.find(ident.impl());
+    if (iter != m_publicToPrivateMap.end())
+        return iter->value;
+    return 0;
+}
+
+inline const Identifier& BuiltinNames::lookUpPublicName(const Identifier& ident) const
+{
+    auto iter = m_privateToPublicMap.find(ident.impl());
+    if (iter != m_privateToPublicMap.end())
+        return *iter->value;
+    return m_emptyIdentifier;
+}
+
+inline void BuiltinNames::appendExternalName(const Identifier& publicName, const Identifier& privateName)
+{
+#ifndef NDEBUG
+    for (const auto& key : m_publicToPrivateMap.keys())
+        ASSERT(publicName.string() != *key);
+#endif
+
+    m_privateToPublicMap.add(privateName.impl(), &publicName);
+    m_publicToPrivateMap.add(publicName.impl(), &privateName);
+}
+
+} // namespace JSC
diff --git a/builtins/BuiltinUtils.h b/builtins/BuiltinUtils.h
new file mode 100644
index 0000000..26da291
--- /dev/null
+++ b/builtins/BuiltinUtils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Canon Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConstructAbility.h"
+
+namespace JSC {
+
+#define INITIALIZE_BUILTIN_NAMES(name) , m_##name(JSC::Identifier::fromString(vm, #name)), m_##name##PrivateName(JSC::Identifier::fromUid(JSC::PrivateName(JSC::PrivateName::Description, ASCIILiteral("PrivateSymbol." #name))))
+#define DECLARE_BUILTIN_NAMES(name) const JSC::Identifier m_##name; const JSC::Identifier m_##name##PrivateName;
+#define DECLARE_BUILTIN_IDENTIFIER_ACCESSOR(name) \
+    const JSC::Identifier& name##PublicName() const { return m_##name; } \
+    const JSC::Identifier& name##PrivateName() const { return m_##name##PrivateName; }
+
+#define INITIALIZE_BUILTIN_SYMBOLS(name) , m_##name##Symbol(JSC::Identifier::fromUid(JSC::PrivateName(JSC::PrivateName::Description, ASCIILiteral("Symbol." #name)))), m_##name##SymbolPrivateIdentifier(JSC::Identifier::fromString(vm, #name "Symbol"))
+#define DECLARE_BUILTIN_SYMBOLS(name) const JSC::Identifier m_##name##Symbol; const JSC::Identifier m_##name##SymbolPrivateIdentifier;
+#define DECLARE_BUILTIN_SYMBOL_ACCESSOR(name) \
+    const JSC::Identifier& name##Symbol() const { return m_##name##Symbol; }
+
+class Identifier;
+class SourceCode;
+class UnlinkedFunctionExecutable;
+class VM;
+
+JS_EXPORT_PRIVATE UnlinkedFunctionExecutable* createBuiltinExecutable(VM&, const SourceCode&, const Identifier&, ConstructAbility);
+    
+} // namespace JSC
diff --git a/builtins/DatePrototype.js b/builtins/DatePrototype.js
new file mode 100644
index 0000000..234f185
--- /dev/null
+++ b/builtins/DatePrototype.js
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(INTL)
+
+function toLocaleString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsAnyAll(opts)
+    {
+        // ToDateTimeOptions(options, "any", "all")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.weekday === @undefined &&
+            options.year === @undefined &&
+            options.month === @undefined &&
+            options.day === @undefined &&
+            options.hour === @undefined &&
+            options.minute === @undefined &&
+            options.second === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.year = "numeric";
+            options.month = "numeric";
+            options.day = "numeric";
+            options.hour = "numeric";
+            options.minute = "numeric";
+            options.second = "numeric";
+        }
+
+        // 9. Return options.
+        return options;
+    }
+
+    // 13.3.1 Date.prototype.toLocaleString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsAnyAll(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
+
+function toLocaleDateString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsDateDate(opts)
+    {
+        // ToDateTimeOptions(options, "date", "date")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.weekday === @undefined &&
+            options.year === @undefined &&
+            options.month === @undefined &&
+            options.day === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.year = "numeric";
+            options.month = "numeric";
+            options.day = "numeric";
+        }
+
+        return options;
+    }
+
+    // 13.3.2 Date.prototype.toLocaleDateString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleDateString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsDateDate(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
+
+function toLocaleTimeString(/* locales, options */)
+{
+    "use strict";
+
+    function toDateTimeOptionsTimeTime(opts)
+    {
+        // ToDateTimeOptions(options, "time", "time")
+        // http://www.ecma-international.org/ecma-402/2.0/#sec-InitializeDateTimeFormat
+
+        var options;
+        if (opts === @undefined)
+            options = null;
+        else if (opts === null)
+            @throwTypeError("null is not an object");
+        else
+            options = @Object(opts);
+
+        // Check original instead of descendant to reduce lookups up the prototype chain.
+        var needsDefaults = !options || (
+            options.hour === @undefined &&
+            options.minute === @undefined &&
+            options.second === @undefined
+        );
+
+        // Only create descendant if it will have own properties.
+        if (needsDefaults) {
+            options = @Object.@create(options);
+            options.hour = "numeric";
+            options.minute = "numeric";
+            options.second = "numeric";
+        }
+
+        return options;
+    }
+
+    // 13.3.3 Date.prototype.toLocaleTimeString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://www.ecma-international.org/ecma-402/2.0/#sec-Date.prototype.toLocaleTimeString
+
+    var value = @thisTimeValue.@call(this);
+    if (@isNaN(value))
+        return "Invalid Date";
+
+    var options = toDateTimeOptionsTimeTime(@argument(1));
+    var locales = @argument(0);
+
+    var dateFormat = new @DateTimeFormat(locales, options);
+    return dateFormat.format(value);
+}
diff --git a/builtins/FunctionPrototype.js b/builtins/FunctionPrototype.js
new file mode 100644
index 0000000..f1ee867
--- /dev/null
+++ b/builtins/FunctionPrototype.js
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function call(thisArgument)
+{
+    "use strict";
+
+    let argumentValues = [];
+    // Start from 1 to ignore thisArgument
+    for (let i = 1; i < arguments.length; i++)
+        @putByValDirect(argumentValues, i-1, arguments[i]);
+
+    return this.@apply(thisArgument, argumentValues);
+}
+
+function apply(thisValue, argumentValues)
+{
+    "use strict";
+
+    return this.@apply(thisValue, argumentValues);
+}
+
+// FIXME: this should have a different name: https://bugs.webkit.org/show_bug.cgi?id=151363
+function symbolHasInstance(value)
+{
+    "use strict";
+
+    if (typeof this !== "function")
+        return false;
+
+    if (@isBoundFunction(this))
+        return @hasInstanceBoundFunction(this, value);
+
+    let target = this.prototype;
+    return @instanceOf(value, target);
+}
+
+function bind(thisValue)
+{
+    "use strict";
+
+    let target = this;
+    if (typeof target !== "function")
+        @throwTypeError("|this| is not a function inside Function.prototype.bind");
+
+    let argumentCount = arguments.length;
+    let boundArgs = null;
+    let numBoundArgs = 0;
+    if (argumentCount > 1) {
+        numBoundArgs = argumentCount - 1;
+        boundArgs = @newArrayWithSize(numBoundArgs);
+        for (let i = 0; i < numBoundArgs; i++)
+            @putByValDirect(boundArgs, i, arguments[i + 1]);
+    }
+
+    let length = 0;
+    if (@hasOwnLengthProperty(target)) {
+        let lengthValue = target.length;
+        if (typeof lengthValue === "number") {
+            lengthValue = lengthValue | 0;
+            // Note that we only care about positive lengthValues, however, this comparision
+            // against numBoundArgs suffices to prove we're not a negative number.
+            if (lengthValue > numBoundArgs)
+                length = lengthValue - numBoundArgs;
+        }
+    }
+
+    let name = target.name;
+    if (typeof name !== "string")
+        name = "";
+
+    return @makeBoundFunction(target, arguments[0], boundArgs, length, name);
+}
diff --git a/builtins/GeneratorPrototype.js b/builtins/GeneratorPrototype.js
new file mode 100644
index 0000000..4128a35
--- /dev/null
+++ b/builtins/GeneratorPrototype.js
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015-2016 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// 25.3.3.3 GeneratorResume ( generator, value )
+// 25.3.3.4 GeneratorResumeAbrupt(generator, abruptCompletion)
+@globalPrivate
+function generatorResume(generator, sentValue, resumeMode)
+{
+    "use strict";
+
+    let state = generator.@generatorState;
+    let done = false;
+    let value = @undefined;
+
+    if (typeof state !== 'number')
+        @throwTypeError("|this| should be a generator");
+
+    if (state === @GeneratorStateExecuting)
+        @throwTypeError("Generator is executing");
+
+    if (state === @GeneratorStateCompleted) {
+        if (resumeMode === @GeneratorResumeModeThrow)
+            throw sentValue;
+
+        done = true;
+        if (resumeMode === @GeneratorResumeModeReturn)
+            value = sentValue;
+    } else {
+        try {
+            generator.@generatorState = @GeneratorStateExecuting;
+            value = generator.@generatorNext.@call(generator.@generatorThis, generator, state, sentValue, resumeMode, generator.@generatorFrame);
+            if (generator.@generatorState === @GeneratorStateExecuting) {
+                generator.@generatorState = @GeneratorStateCompleted;
+                done = true;
+            }
+        } catch (error) {
+            generator.@generatorState = @GeneratorStateCompleted;
+            throw error;
+        }
+    }
+    return { done, value };
+}
+
+function next(value)
+{
+    "use strict";
+
+    return @generatorResume(this, value, @GeneratorResumeModeNormal);
+}
+
+function return(value)
+{
+    "use strict";
+
+    return @generatorResume(this, value, @GeneratorResumeModeReturn);
+}
+
+function throw(exception)
+{
+    "use strict";
+
+    return @generatorResume(this, exception, @GeneratorResumeModeThrow);
+}
diff --git a/builtins/GlobalObject.js b/builtins/GlobalObject.js
new file mode 100644
index 0000000..804930c
--- /dev/null
+++ b/builtins/GlobalObject.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2016 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function isFinite(value)
+{
+    "use strict";
+
+    var numberValue = @toNumber(value);
+    // Return false if numberValue is |NaN|.
+    if (numberValue !== numberValue)
+        return false;
+    return numberValue !== @Infinity && numberValue !== -@Infinity;
+}
+
+@globalPrivate
+function isNaN(value)
+{
+    "use strict";
+
+    var numberValue = @toNumber(value);
+    return numberValue !== numberValue;
+}
diff --git a/builtins/GlobalOperations.js b/builtins/GlobalOperations.js
new file mode 100644
index 0000000..22220cf
--- /dev/null
+++ b/builtins/GlobalOperations.js
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+@globalPrivate
+function toInteger(target)
+{
+    "use strict";
+
+    var numberValue = @Number(target);
+
+    // isNaN(numberValue)
+    if (numberValue !== numberValue)
+        return 0;
+    return @trunc(numberValue);
+}
+
+@globalPrivate
+function toLength(target)
+{
+    "use strict";
+
+    var length = @toInteger(target);
+    // originally Math.min(Math.max(length, 0), maxSafeInteger));
+    return length > 0 ? (length < @MAX_SAFE_INTEGER ? length : @MAX_SAFE_INTEGER) : 0;
+}
+
+@globalPrivate
+function isDictionary(object)
+{
+    "use strict";
+
+    return object == null || typeof object === "object";
+}
+
+// FIXME: this needs to have it's name changed to "get [Symbol.species]".
+// see: https://bugs.webkit.org/show_bug.cgi?id=151363
+@globalPrivate
+function speciesGetter()
+{
+    return this;
+}
+
+@globalPrivate
+function speciesConstructor(obj, defaultConstructor)
+{
+    var constructor = obj.constructor;
+    if (constructor === @undefined)
+        return defaultConstructor;
+    if (!@isObject(constructor))
+        @throwTypeError("|this|.constructor is not an Object or undefined");
+    constructor = constructor.@speciesSymbol;
+    if (constructor == null)
+        return defaultConstructor;
+    if (@isConstructor(constructor))
+        return constructor;
+    @throwTypeError("|this|.constructor[Symbol.species] is not a constructor");
+}
diff --git a/builtins/InspectorInstrumentationObject.js b/builtins/InspectorInstrumentationObject.js
new file mode 100644
index 0000000..fb7d9ea
--- /dev/null
+++ b/builtins/InspectorInstrumentationObject.js
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function promiseFulfilled(promise, value, reactions)
+{
+    "use strict";
+
+    if (!this.isEnabled)
+        return;
+}
+
+function promiseRejected(promise, reason, reactions)
+{
+    "use strict";
+
+    if (!this.isEnabled)
+        return;
+}
diff --git a/builtins/InternalPromiseConstructor.js b/builtins/InternalPromiseConstructor.js
new file mode 100644
index 0000000..d01f5f7
--- /dev/null
+++ b/builtins/InternalPromiseConstructor.js
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function internalAll(array)
+{
+    // This function is intended to be used in the JSC internals.
+    // The implementation should take care not to perform the user
+    // observable / trappable operations.
+    //
+    // 1. Don't use for-of and iterables. This function only accepts
+    //    the dense array of the promises.
+    // 2. Don't look up this.constructor / @@species. Always construct
+    //    the plain Promise object.
+
+    "use strict";
+
+    var promiseCapability = @newPromiseCapability(@InternalPromise);
+
+    var values = [];
+    var index = 0;
+    var remainingElementsCount = 0;
+
+    function newResolveElement(index)
+    {
+        var alreadyCalled = false;
+        return function (argument)
+        {
+            if (alreadyCalled)
+                return @undefined;
+            alreadyCalled = true;
+
+            @putByValDirect(values, index, argument);
+
+            --remainingElementsCount;
+            if (remainingElementsCount === 0)
+                return promiseCapability.@resolve.@call(@undefined, values);
+
+            return @undefined;
+        }
+    }
+
+    try {
+        if (array.length === 0)
+            promiseCapability.@resolve.@call(@undefined, values);
+        else {
+            for (var index = 0, length = array.length; index < length; ++index) {
+                var value = array[index];
+                @putByValDirect(values, index, @undefined);
+
+                var nextPromiseCapability = @newPromiseCapability(@InternalPromise);
+                nextPromiseCapability.@resolve.@call(@undefined, value);
+                var nextPromise = nextPromiseCapability.@promise;
+
+                var resolveElement = newResolveElement(index);
+                ++remainingElementsCount;
+                nextPromise.then(resolveElement, promiseCapability.@reject);
+            }
+        }
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
diff --git a/builtins/IteratorHelpers.js b/builtins/IteratorHelpers.js
new file mode 100644
index 0000000..f565d44
--- /dev/null
+++ b/builtins/IteratorHelpers.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function performIteration(iterable)
+{
+    "use strict";
+    // This is performing a spread operation on the iterable passed in,
+    // and returning the result in an array.
+    // https://tc39.github.io/ecma262/#sec-runtime-semantics-arrayaccumulation
+
+    let result = [];
+
+    let iterator = iterable.@iteratorSymbol();
+    let item;
+    let index = 0;
+    while (true) {
+        item = iterator.next();
+        if (!@isObject(item))
+            @throwTypeError("Iterator result interface is not an object");
+        if (item.done)
+            return result;
+        @putByValDirect(result, index++, item.value);
+    }
+}
diff --git a/builtins/IteratorPrototype.js b/builtins/IteratorPrototype.js
new file mode 100644
index 0000000..5c1691a
--- /dev/null
+++ b/builtins/IteratorPrototype.js
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function symbolIteratorGetter()
+{
+    "use strict";
+
+    return this;
+}
diff --git a/builtins/MapPrototype.js b/builtins/MapPrototype.js
new file mode 100644
index 0000000..8302602
--- /dev/null
+++ b/builtins/MapPrototype.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (!@isMap(this))
+        @throwTypeError("Map operation called on non-Map object");
+
+    if (typeof callback !== 'function')
+        @throwTypeError("Map.prototype.forEach callback must be a function");
+
+    var thisArg = @argument(1);
+    var iterator = @MapIterator(this);
+
+    // To avoid object allocations for iterator result objects, we pass the placeholder to the special "next" function in order to fill the results.
+    var value = [ @undefined, @undefined ];
+    for (;;) {
+        if (@mapIteratorNext.@call(iterator, value))
+            break;
+        callback.@call(thisArg, value[1], value[0], this);
+    }
+}
diff --git a/builtins/ModuleLoaderPrototype.js b/builtins/ModuleLoaderPrototype.js
new file mode 100644
index 0000000..01c2b21
--- /dev/null
+++ b/builtins/ModuleLoaderPrototype.js
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// https://whatwg.github.io/loader/#loader-object
+// Module Loader has several hooks that can be customized by the platform.
+// For example, the [[Fetch]] hook can be provided by the JavaScriptCore shell
+// as fetching the payload from the local file system.
+// Currently, there are 4 hooks.
+//    1. Loader.resolve
+//    2. Loader.fetch
+//    3. Loader.instantiate
+
+@globalPrivate
+function setStateToMax(entry, newState)
+{
+    // https://whatwg.github.io/loader/#set-state-to-max
+
+    "use strict";
+
+    if (entry.state < newState)
+        entry.state = newState;
+}
+
+@globalPrivate
+function newRegistryEntry(key)
+{
+    // https://whatwg.github.io/loader/#registry
+    //
+    // Each registry entry becomes one of the 5 states.
+    // 1. Fetch
+    //     Ready to fetch (or now fetching) the resource of this module.
+    //     Typically, we fetch the source code over the network or from the file system.
+    //     a. If the status is Fetch and there is no entry.fetch promise, the entry is ready to fetch.
+    //     b. If the status is Fetch and there is the entry.fetch promise, the entry is just fetching the resource.
+    //
+    // 2. Instantiate (AnalyzeModule)
+    //     Ready to instantiate (or now instantiating) the module record from the fetched
+    //     source code.
+    //     Typically, we parse the module code, extract the dependencies and binding information.
+    //     a. If the status is Instantiate and there is no entry.instantiate promise, the entry is ready to instantiate.
+    //     b. If the status is Instantiate and there is the entry.fetch promise, the entry is just instantiating
+    //        the module record.
+    //
+    // 3. Satisfy
+    //     Ready to request the dependent modules (or now requesting & resolving).
+    //     Without this state, the current draft causes infinite recursion when there is circular dependency.
+    //     a. If the status is Satisfy and there is no entry.satisfy promise, the entry is ready to resolve the dependencies.
+    //     b. If the status is Satisfy and there is the entry.satisfy promise, the entry is just resolving
+    //        the dependencies.
+    //
+    // 4. Link
+    //     Ready to link the module with the other modules.
+    //     Linking means that the module imports and exports the bindings from/to the other modules.
+    //
+    // 5. Ready
+    //     The module is linked, so the module is ready to be executed.
+    //
+    // Each registry entry has the 4 promises; "fetch", "instantiate" and "satisfy".
+    // They are assigned when starting the each phase. And they are fulfilled when the each phase is completed.
+    //
+    // In the current module draft, linking will be performed after the whole modules are instantiated and the dependencies are resolved.
+    // And execution is also done after the all modules are linked.
+    //
+    // TODO: We need to exploit the way to execute the module while fetching non-related modules.
+    // One solution; introducing the ready promise chain to execute the modules concurrently while keeping
+    // the execution order.
+
+    "use strict";
+
+    return {
+        key: key,
+        state: @ModuleFetch,
+        metadata: @undefined,
+        fetch: @undefined,
+        instantiate: @undefined,
+        satisfy: @undefined,
+        dependencies: [], // To keep the module order, we store the module keys in the array.
+        dependenciesMap: @undefined,
+        module: @undefined, // JSModuleRecord
+        error: @undefined,
+    };
+}
+
+function ensureRegistered(key)
+{
+    // https://whatwg.github.io/loader/#ensure-registered
+
+    "use strict";
+
+    var entry = this.registry.@get(key);
+    if (entry)
+        return entry;
+
+    entry = @newRegistryEntry(key);
+    this.registry.@set(key, entry);
+
+    return entry;
+}
+
+function forceFulfillPromise(promise, value)
+{
+    "use strict";
+
+    if (promise.@promiseState === @promiseStatePending)
+        @fulfillPromise(promise, value);
+}
+
+function fulfillFetch(entry, payload)
+{
+    // https://whatwg.github.io/loader/#fulfill-fetch
+
+    "use strict";
+
+    if (!entry.fetch)
+        entry.fetch = @newPromiseCapability(@InternalPromise).@promise;
+    this.forceFulfillPromise(entry.fetch, payload);
+    @setStateToMax(entry, @ModuleInstantiate);
+}
+
+function fulfillInstantiate(entry, optionalInstance, source)
+{
+    // https://whatwg.github.io/loader/#fulfill-instantiate
+
+    "use strict";
+
+    if (!entry.instantiate)
+        entry.instantiate = @newPromiseCapability(@InternalPromise).@promise;
+    this.commitInstantiated(entry, optionalInstance, source);
+
+    // FIXME: The draft fulfills the promise in the CommitInstantiated operation.
+    // But it CommitInstantiated is also used in the requestInstantiate and
+    // we should not "force fulfill" there.
+    // So we separate "force fulfill" operation from the CommitInstantiated operation.
+    // https://github.com/whatwg/loader/pull/67
+    this.forceFulfillPromise(entry.instantiate, entry);
+}
+
+function commitInstantiated(entry, optionalInstance, source)
+{
+    // https://whatwg.github.io/loader/#commit-instantiated
+
+    "use strict";
+
+    var moduleRecord = this.instantiation(optionalInstance, source, entry);
+
+    // FIXME: Described in the draft,
+    //   4. Fulfill entry.[[Instantiate]] with instance.
+    // But, instantiate promise should be fulfilled with the entry.
+    // We remove this statement because instantiate promise will be
+    // fulfilled without this "force fulfill" operation.
+    // https://github.com/whatwg/loader/pull/67
+
+    var dependencies = [];
+    var dependenciesMap = moduleRecord.dependenciesMap;
+    moduleRecord.registryEntry = entry;
+    var requestedModules = this.requestedModules(moduleRecord);
+    for (var i = 0, length = requestedModules.length; i < length; ++i) {
+        var depKey = requestedModules[i];
+        var pair = {
+            key: depKey,
+            value: @undefined
+        };
+        @putByValDirect(dependencies, dependencies.length, pair);
+        dependenciesMap.@set(depKey, pair);
+    }
+    entry.dependencies = dependencies;
+    entry.dependenciesMap = dependenciesMap;
+    entry.module = moduleRecord;
+    @setStateToMax(entry, @ModuleSatisfy);
+}
+
+function instantiation(result, source, entry)
+{
+    // https://whatwg.github.io/loader/#instantiation
+    // FIXME: Current implementation does not support optionalInstance.
+    // https://bugs.webkit.org/show_bug.cgi?id=148171
+
+    "use strict";
+
+    return this.parseModule(entry.key, source);
+}
+
+// Loader.
+
+function requestFetch(key, initiator)
+{
+    // https://whatwg.github.io/loader/#request-fetch
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.fetch)
+        return entry.fetch;
+
+    // Hook point.
+    // 2. Loader.fetch
+    //     https://whatwg.github.io/loader/#browser-fetch
+    //     Take the key and fetch the resource actually.
+    //     For example, JavaScriptCore shell can provide the hook fetching the resource
+    //     from the local file system.
+    var fetchPromise = this.fetch(key, initiator).then((payload) => {
+        @setStateToMax(entry, @ModuleInstantiate);
+        return payload;
+    });
+    entry.fetch = fetchPromise;
+    return fetchPromise;
+}
+
+function requestInstantiate(key, initiator)
+{
+    // https://whatwg.github.io/loader/#request-instantiate
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.instantiate)
+        return entry.instantiate;
+
+    var instantiatePromise = this.requestFetch(key, initiator).then((source) => {
+        // Hook point.
+        // 3. Loader.instantiate
+        //     https://whatwg.github.io/loader/#browser-instantiate
+        //     Take the key and the fetched source code, and instantiate the module record
+        //     by parsing the module source code.
+        //     It has the chance to provide the optional module instance that is different from
+        //     the ordinary one.
+        return this.instantiate(key, source, initiator).then((optionalInstance) => {
+            this.commitInstantiated(entry, optionalInstance, source);
+            return entry;
+        });
+    });
+    entry.instantiate = instantiatePromise;
+    return instantiatePromise;
+}
+
+function requestSatisfy(key, initiator)
+{
+    // https://whatwg.github.io/loader/#satisfy-instance
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.satisfy)
+        return entry.satisfy;
+
+    var satisfyPromise = this.requestInstantiate(key, initiator).then((entry) => {
+        var depLoads = [];
+        for (var i = 0, length = entry.dependencies.length; i < length; ++i) {
+            let pair = entry.dependencies[i];
+
+            // Hook point.
+            // 1. Loader.resolve.
+            //     https://whatwg.github.io/loader/#browser-resolve
+            //     Take the name and resolve it to the unique identifier for the resource location.
+            //     For example, take the "jquery" and return the URL for the resource.
+            var promise = this.resolve(pair.key, key, initiator).then((depKey) => {
+                var depEntry = this.ensureRegistered(depKey);
+
+                // Recursive resolving. The dependencies of this entry is being resolved or already resolved.
+                // Stop tracing the circular dependencies.
+                // But to retrieve the instantiated module record correctly,
+                // we need to wait for the instantiation for the dependent module.
+                // For example, reaching here, the module is starting resolving the dependencies.
+                // But the module may or may not reach the instantiation phase in the loader's pipeline.
+                // If we wait for the Satisfy for this module, it construct the circular promise chain and
+                // rejected by the Promises runtime. Since only we need is the instantiated module, instead of waiting
+                // the Satisfy for this module, we just wait Instantiate for this.
+                if (depEntry.satisfy) {
+                    return depEntry.instantiate.then((entry) => {
+                        pair.value = entry.module;
+                        return entry;
+                    });
+                }
+
+                return this.requestSatisfy(depKey, initiator).then((entry) => {
+                    pair.value = entry.module;
+                    return entry;
+                });
+            });
+            @putByValDirect(depLoads, depLoads.length, promise);
+        }
+
+        return @InternalPromise.internalAll(depLoads).then((modules) => {
+            @setStateToMax(entry, @ModuleLink);
+            return entry;
+        });
+    });
+
+    entry.satisfy = satisfyPromise;
+    return satisfyPromise;
+}
+
+function requestInstantiateAll(key, initiator)
+{
+    // https://whatwg.github.io/loader/#request-instantiate-all
+
+    "use strict";
+
+    return this.requestSatisfy(key, initiator);
+}
+
+function requestLink(key, initiator)
+{
+    // https://whatwg.github.io/loader/#request-link
+
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.state > @ModuleLink) {
+        var deferred = @newPromiseCapability(@InternalPromise);
+        deferred.@resolve.@call(@undefined, entry);
+        return deferred.@promise;
+    }
+
+    return this.requestInstantiateAll(key, initiator).then((entry) => {
+        this.link(entry, initiator);
+        return entry;
+    });
+}
+
+function requestReady(key, initiator)
+{
+    // https://whatwg.github.io/loader/#request-ready
+
+    "use strict";
+
+    return this.requestLink(key, initiator).then((entry) => {
+        this.moduleEvaluation(entry.module, initiator);
+    });
+}
+
+// Linking semantics.
+
+function link(entry, initiator)
+{
+    // https://whatwg.github.io/loader/#link
+
+    "use strict";
+
+    // FIXME: Current implementation does not support optionalInstance.
+    // So Link's step 3 is skipped.
+    // https://bugs.webkit.org/show_bug.cgi?id=148171
+
+    if (entry.state === @ModuleReady)
+        return;
+    @setStateToMax(entry, @ModuleReady);
+
+    // Since we already have the "dependencies" field,
+    // we can call moduleDeclarationInstantiation with the correct order
+    // without constructing the dependency graph by calling dependencyGraph.
+    var dependencies = entry.dependencies;
+    for (var i = 0, length = dependencies.length; i < length; ++i) {
+        var pair = dependencies[i];
+        this.link(pair.value.registryEntry, initiator);
+    }
+
+    this.moduleDeclarationInstantiation(entry.module, initiator);
+}
+
+// Module semantics.
+
+function moduleEvaluation(moduleRecord, initiator)
+{
+    // http://www.ecma-international.org/ecma-262/6.0/#sec-moduleevaluation
+
+    "use strict";
+
+    if (moduleRecord.evaluated)
+        return;
+    moduleRecord.evaluated = true;
+
+    var entry = moduleRecord.registryEntry;
+
+    // The contents of the [[RequestedModules]] is cloned into entry.dependencies.
+    var dependencies = entry.dependencies;
+    for (var i = 0, length = dependencies.length; i < length; ++i) {
+        var pair = dependencies[i];
+        var requiredModuleRecord = pair.value;
+        this.moduleEvaluation(requiredModuleRecord, initiator);
+    }
+    this.evaluate(entry.key, moduleRecord, initiator);
+}
+
+// APIs to control the module loader.
+
+function provide(key, stage, value)
+{
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+
+    if (stage === @ModuleFetch) {
+        if (entry.state > @ModuleFetch)
+            @throwTypeError("Requested module is already fetched.");
+        this.fulfillFetch(entry, value);
+        return;
+    }
+
+    if (stage === @ModuleInstantiate) {
+        if (entry.state > @ModuleInstantiate)
+            @throwTypeError("Requested module is already instantiated.");
+        this.fulfillFetch(entry, @undefined);
+        entry.fetch.then((source) => {
+            this.fulfillInstantiate(entry, value, source);
+        });
+        return;
+    }
+
+    @throwTypeError("Requested module is already ready to be executed.");
+}
+
+function loadAndEvaluateModule(moduleName, referrer, initiator)
+{
+    "use strict";
+
+    // Loader.resolve hook point.
+    // resolve: moduleName => Promise(moduleKey)
+    // Take the name and resolve it to the unique identifier for the resource location.
+    // For example, take the "jquery" and return the URL for the resource.
+    return this.resolve(moduleName, referrer, initiator).then((key) => {
+        return this.requestReady(key, initiator);
+    });
+}
+
+function loadModule(moduleName, referrer, initiator)
+{
+    "use strict";
+
+    // Loader.resolve hook point.
+    // resolve: moduleName => Promise(moduleKey)
+    // Take the name and resolve it to the unique identifier for the resource location.
+    // For example, take the "jquery" and return the URL for the resource.
+    return this.resolve(moduleName, referrer, initiator).then((key) => {
+        return this.requestInstantiateAll(key, initiator);
+    }).then((entry) => {
+        return entry.key;
+    });
+}
+
+function linkAndEvaluateModule(key, initiator)
+{
+    "use strict";
+
+    var entry = this.ensureRegistered(key);
+    if (entry.state < @ModuleLink)
+        @throwTypeError("Requested module is not instantiated yet.");
+
+    this.link(entry, initiator);
+    return this.moduleEvaluation(entry.module, initiator);
+}
diff --git a/builtins/NumberConstructor.js b/builtins/NumberConstructor.js
new file mode 100644
index 0000000..2c0e4c8
--- /dev/null
+++ b/builtins/NumberConstructor.js
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function isFinite(value)
+{
+    "use strict";
+
+    if (typeof value !== "number")
+        return false;
+
+    // Return false if value is |NaN|.
+    if (value !== value)
+        return false;
+
+    return value !== @Infinity && value !== -@Infinity;
+}
+
+function isNaN(value)
+{
+    "use strict";
+
+    return value !== value;
+}
diff --git a/builtins/NumberPrototype.js b/builtins/NumberPrototype.js
new file mode 100644
index 0000000..435ea78
--- /dev/null
+++ b/builtins/NumberPrototype.js
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @conditional=ENABLE(INTL)
+
+function toLocaleString(/* locales, options */)
+{
+    "use strict";
+
+    // 13.2.1 Number.prototype.toLocaleString ([locales [, options ]]) (ECMA-402 2.0)
+    // http://ecma-international.org/publications/standards/Ecma-402.htm
+
+    // 1. Let x be thisNumberValue(this value).
+    // 2. ReturnIfAbrupt(x).
+    var number = @thisNumberValue.@call(this);
+
+    // 3. Let numberFormat be Construct(%NumberFormat%, «locales, options»).
+    // 4. ReturnIfAbrupt(numberFormat).
+    var numberFormat = new @NumberFormat(@argument(0), @argument(1));
+
+    // 5. Return FormatNumber(numberFormat, x).
+    return numberFormat.format(number);
+}
diff --git a/builtins/ObjectConstructor.js b/builtins/ObjectConstructor.js
new file mode 100644
index 0000000..d855beb
--- /dev/null
+++ b/builtins/ObjectConstructor.js
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 Oleksandr Skachkov .
+ * Copyright (C) 2015 Jordan Harband. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+@globalPrivate
+function enumerableOwnProperties(object, kind)
+{
+    "use strict";
+
+    const obj = @Object(object);
+    const ownKeys = @Reflect.@ownKeys(obj);
+    const properties = [];
+    for (let i = 0, keysLength = ownKeys.length; i < keysLength; ++i) {
+        let nextKey = ownKeys[i];
+        if (typeof nextKey === 'string') {
+            let descriptor = @Reflect.@getOwnPropertyDescriptor(obj, nextKey);
+            if (descriptor !== @undefined && descriptor.enumerable) {
+                if (kind === @iterationKindValue)
+                    properties.@push(obj[nextKey]);
+                else if (kind === @iterationKindKeyValue)
+                    properties.@push([nextKey, obj[nextKey]]);
+            }
+        }
+    }
+    
+    return properties;
+}
+
+function values(object)
+{
+    "use strict";
+    
+    if (object == null)
+        @throwTypeError("Object.values requires that input parameter not be null or undefined");
+
+    return @enumerableOwnProperties(object, @iterationKindValue);
+}
+
+function entries(object)
+{
+    "use strict";
+    
+    if (object == null)
+        @throwTypeError("Object.entries requires that input parameter not be null or undefined");
+    
+    return @enumerableOwnProperties(object, @iterationKindKeyValue);
+}
+
+function assign(target/*[*/, /*...*/sources/*] */)
+{
+    "use strict";
+
+    if (target == null)
+        @throwTypeError("Object.assign requires that input parameter not be null or undefined");
+
+    let objTarget = @Object(target);
+    for (let s = 1, argumentsLength = arguments.length; s < argumentsLength; ++s) {
+        let nextSource = arguments[s];
+        if (nextSource != null) {
+            let from = @Object(nextSource);
+            let keys = @Reflect.@ownKeys(from);
+            for (let i = 0, keysLength = keys.length; i < keysLength; ++i) {
+                let nextKey = keys[i];
+                let descriptor = @Reflect.@getOwnPropertyDescriptor(from, nextKey);
+                if (descriptor !== @undefined && descriptor.enumerable)
+                    objTarget[nextKey] = from[nextKey];
+            }
+        }
+    }
+    return objTarget;
+}
diff --git a/builtins/PromiseConstructor.js b/builtins/PromiseConstructor.js
new file mode 100644
index 0000000..3f0848d
--- /dev/null
+++ b/builtins/PromiseConstructor.js
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function all(iterable)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    var values = [];
+    var index = 0;
+    var remainingElementsCount = 1;
+
+    function newResolveElement(index)
+    {
+        var alreadyCalled = false;
+        return function (argument)
+        {
+            if (alreadyCalled)
+                return @undefined;
+            alreadyCalled = true;
+
+            @putByValDirect(values, index, argument);
+
+            --remainingElementsCount;
+            if (remainingElementsCount === 0)
+                return promiseCapability.@resolve.@call(@undefined, values);
+
+            return @undefined;
+        }
+    }
+
+    try {
+        for (var value of iterable) {
+            @putByValDirect(values, index, @undefined);
+            var nextPromise = this.resolve(value);
+            var resolveElement = newResolveElement(index);
+            ++remainingElementsCount;
+            nextPromise.then(resolveElement, promiseCapability.@reject);
+            ++index;
+        }
+
+        --remainingElementsCount;
+        if (remainingElementsCount === 0)
+            promiseCapability.@resolve.@call(@undefined, values);
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
+
+function race(iterable)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    try {
+        for (var value of iterable) {
+            var nextPromise = this.resolve(value);
+            nextPromise.then(promiseCapability.@resolve, promiseCapability.@reject);
+        }
+    } catch (error) {
+        promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@promise;
+}
+
+function reject(reason)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    promiseCapability.@reject.@call(@undefined, reason);
+
+    return promiseCapability.@promise;
+}
+
+function resolve(value)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("|this| is not a object");
+
+    if (@isPromise(value)) {
+        var valueConstructor = value.constructor;
+        if (valueConstructor === this)
+            return value;
+    }
+
+    var promiseCapability = @newPromiseCapability(this);
+
+    promiseCapability.@resolve.@call(@undefined, value);
+
+    return promiseCapability.@promise;
+}
diff --git a/builtins/PromiseOperations.js b/builtins/PromiseOperations.js
new file mode 100644
index 0000000..61564e7
--- /dev/null
+++ b/builtins/PromiseOperations.js
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// @internal
+
+@globalPrivate
+function isPromise(promise)
+{
+    "use strict";
+
+    return @isObject(promise) && !!promise.@promiseState;
+}
+
+@globalPrivate
+function newPromiseReaction(capability, onFulfilled, onRejected)
+{
+    "use strict";
+
+    return {
+        @capabilities: capability,
+        @onFulfilled: onFulfilled,
+        @onRejected: onRejected,
+    };
+}
+
+@globalPrivate
+function newPromiseCapability(constructor)
+{
+    "use strict";
+
+    if (!@isConstructor(constructor))
+        @throwTypeError("promise capability requires a constructor function");
+
+    var promiseCapability = {
+        @promise: @undefined,
+        @resolve: @undefined,
+        @reject: @undefined
+    };
+
+    function executor(resolve, reject)
+    {
+        if (promiseCapability.@resolve !== @undefined)
+            @throwTypeError("resolve function is already set");
+        if (promiseCapability.@reject !== @undefined)
+            @throwTypeError("reject function is already set");
+
+        promiseCapability.@resolve = resolve;
+        promiseCapability.@reject = reject;
+    }
+
+    var promise = new constructor(executor);
+
+    if (typeof promiseCapability.@resolve !== "function")
+        @throwTypeError("executor did not take a resolve function");
+
+    if (typeof promiseCapability.@reject !== "function")
+        @throwTypeError("executor did not take a reject function");
+
+    promiseCapability.@promise = promise;
+
+    return promiseCapability;
+}
+
+@globalPrivate
+function triggerPromiseReactions(state, reactions, argument)
+{
+    "use strict";
+
+    for (var index = 0, length = reactions.length; index < length; ++index)
+        @enqueueJob(@promiseReactionJob, [state, reactions[index], argument]);
+}
+
+@globalPrivate
+function rejectPromise(promise, reason)
+{
+    "use strict";
+
+    var reactions = promise.@promiseReactions;
+    promise.@promiseResult = reason;
+    promise.@promiseReactions = @undefined;
+    promise.@promiseState = @promiseStateRejected;
+
+    @InspectorInstrumentation.promiseRejected(promise, reason, reactions);
+
+    @triggerPromiseReactions(@promiseStateRejected, reactions, reason);
+}
+
+@globalPrivate
+function fulfillPromise(promise, value)
+{
+    "use strict";
+
+    var reactions = promise.@promiseReactions;
+    promise.@promiseResult = value;
+    promise.@promiseReactions = @undefined;
+    promise.@promiseState = @promiseStateFulfilled;
+
+    @InspectorInstrumentation.promiseFulfilled(promise, value, reactions);
+
+    @triggerPromiseReactions(@promiseStateFulfilled, reactions, value);
+}
+
+@globalPrivate
+function createResolvingFunctions(promise)
+{
+    "use strict";
+
+    var alreadyResolved = false;
+
+    var resolve = function (resolution) {
+        if (alreadyResolved)
+            return @undefined;
+        alreadyResolved = true;
+
+        if (resolution === promise)
+            return @rejectPromise(promise, new @TypeError("Resolve a promise with itself"));
+
+        if (!@isObject(resolution))
+            return @fulfillPromise(promise, resolution);
+
+        var then;
+        try {
+            then = resolution.then;
+        } catch (error) {
+            return @rejectPromise(promise, error);
+        }
+
+        if (typeof then !== 'function')
+            return @fulfillPromise(promise, resolution);
+
+        @enqueueJob(@promiseResolveThenableJob, [promise, resolution, then]);
+
+        return @undefined;
+    };
+
+    var reject = function (reason) {
+        if (alreadyResolved)
+            return @undefined;
+        alreadyResolved = true;
+
+        return @rejectPromise(promise, reason);
+    };
+
+    return {
+        @resolve: resolve,
+        @reject: reject
+    };
+}
+
+@globalPrivate
+function promiseReactionJob(state, reaction, argument)
+{
+    "use strict";
+
+    var promiseCapability = reaction.@capabilities;
+
+    var result;
+    var handler = (state === @promiseStateFulfilled) ? reaction.@onFulfilled: reaction.@onRejected;
+    try {
+        result = handler(argument);
+    } catch (error) {
+        return promiseCapability.@reject.@call(@undefined, error);
+    }
+
+    return promiseCapability.@resolve.@call(@undefined, result);
+}
+
+@globalPrivate
+function promiseResolveThenableJob(promiseToResolve, thenable, then)
+{
+    "use strict";
+
+    var resolvingFunctions = @createResolvingFunctions(promiseToResolve);
+
+    try {
+        return then.@call(thenable, resolvingFunctions.@resolve, resolvingFunctions.@reject);
+    } catch (error) {
+        return resolvingFunctions.@reject.@call(@undefined, error);
+    }
+}
+
+@globalPrivate
+function initializePromise(executor)
+{
+    "use strict";
+
+    if (typeof executor !== 'function')
+        @throwTypeError("Promise constructor takes a function argument");
+
+    this.@promiseState = @promiseStatePending;
+    this.@promiseReactions = [];
+
+    var resolvingFunctions = @createResolvingFunctions(this);
+    try {
+        executor(resolvingFunctions.@resolve, resolvingFunctions.@reject);
+    } catch (error) {
+        return resolvingFunctions.@reject.@call(@undefined, error);
+    }
+
+    return this;
+}
diff --git a/builtins/PromisePrototype.js b/builtins/PromisePrototype.js
new file mode 100644
index 0000000..6065ad8
--- /dev/null
+++ b/builtins/PromisePrototype.js
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function catch(onRejected)
+{
+    "use strict";
+
+    return this.then(@undefined, onRejected);
+}
+
+function then(onFulfilled, onRejected)
+{
+    "use strict";
+
+    if (!@isPromise(this))
+        @throwTypeError("|this| is not a object");
+
+    var constructor = @speciesConstructor(this, @Promise);
+
+    var resultCapability = @newPromiseCapability(constructor);
+
+    if (typeof onFulfilled !== "function")
+        onFulfilled = function (argument) { return argument; };
+
+    if (typeof onRejected !== "function")
+        onRejected = function (argument) { throw argument; };
+
+    var reaction = @newPromiseReaction(resultCapability, onFulfilled, onRejected);
+
+    var state = this.@promiseState;
+    if (state === @promiseStatePending)
+        @putByValDirect(this.@promiseReactions, this.@promiseReactions.length, reaction);
+    else
+        @enqueueJob(@promiseReactionJob, [state, reaction, this.@promiseResult]);
+
+    return resultCapability.@promise;
+}
diff --git a/builtins/ReflectObject.js b/builtins/ReflectObject.js
new file mode 100644
index 0000000..1aaa1f4
--- /dev/null
+++ b/builtins/ReflectObject.js
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// https://tc39.github.io/ecma262/#sec-reflect.apply
+function apply(target, thisArgument, argumentsList)
+{
+    "use strict";
+
+    if (typeof target !== "function")
+        @throwTypeError("Reflect.apply requires the first argument be a function");
+
+    if (!@isObject(argumentsList))
+        @throwTypeError("Reflect.apply requires the third argument be an object");
+
+    return target.@apply(thisArgument, argumentsList);
+}
+
+// https://tc39.github.io/ecma262/#sec-reflect.deleteproperty
+function deleteProperty(target, propertyKey)
+{
+    // Intentionally keep the code the sloppy mode to suppress the TypeError
+    // raised by the delete operator under the strict mode.
+
+    if (!@isObject(target))
+        @throwTypeError("Reflect.deleteProperty requires the first argument be an object");
+
+    return delete target[propertyKey];
+}
+
+// https://tc39.github.io/ecma262/#sec-reflect.has
+function has(target, propertyKey)
+{
+    "use strict";
+
+    if (!@isObject(target))
+        @throwTypeError("Reflect.has requires the first argument be an object");
+
+    return propertyKey in target;
+}
diff --git a/builtins/RegExpPrototype.js b/builtins/RegExpPrototype.js
new file mode 100644
index 0000000..017a81c
--- /dev/null
+++ b/builtins/RegExpPrototype.js
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+@globalPrivate
+function advanceStringIndex(string, index, unicode)
+{
+    // This function implements AdvanceStringIndex described in ES6 21.2.5.2.3.
+    "use strict";
+
+    if (!unicode)
+        return index + 1;
+
+    if (index + 1 >= string.length)
+        return index + 1;
+
+    let first = string.@charCodeAt(index);
+    if (first < 0xD800 || first > 0xDBFF)
+        return index + 1;
+
+    let second = string.@charCodeAt(index + 1);
+    if (second < 0xDC00 || second > 0xDFFF)
+        return index + 1;
+
+    return index + 2;
+}
+
+@globalPrivate
+function regExpExec(regexp, str)
+{
+    "use strict";
+
+    let exec = regexp.exec;
+    let builtinExec = @regExpBuiltinExec;
+    if (exec !== builtinExec && typeof exec === "function") {
+        let result = exec.@call(regexp, str);
+        if (result !== null && !@isObject(result))
+            @throwTypeError("The result of a RegExp exec must be null or an object");
+        return result;
+    }
+    return builtinExec.@call(regexp, str);
+}
+
+@globalPrivate
+function hasObservableSideEffectsForRegExpMatch(regexp) {
+    // This is accessed by the RegExpExec internal function.
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+
+    return !@isRegExpObject(regexp);
+}
+
+function match(strArg)
+{
+    "use strict";
+
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@match requires that |this| be an Object");
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (!@hasObservableSideEffectsForRegExpMatch(regexp))
+        return @regExpMatchFast.@call(regexp, strArg);
+
+    let str = @toString(strArg);
+
+    if (!regexp.global)
+        return @regExpExec(regexp, str);
+    
+    let unicode = regexp.unicode;
+    regexp.lastIndex = 0;
+    let resultList = [];
+
+    // FIXME: It would be great to implement a solution similar to what we do in
+    // RegExpObject::matchGlobal(). It's not clear if this is possible, since this loop has
+    // effects. https://bugs.webkit.org/show_bug.cgi?id=158145
+    const maximumReasonableMatchSize = 100000000;
+
+    while (true) {
+        let result = @regExpExec(regexp, str);
+        
+        if (result === null) {
+            if (resultList.length === 0)
+                return null;
+            return resultList;
+        }
+
+        if (resultList.length > maximumReasonableMatchSize)
+            @throwOutOfMemoryError();
+
+        if (!@isObject(result))
+            @throwTypeError("RegExp.prototype.@@match call to RegExp.exec didn't return null or an object");
+
+        let resultString = @toString(result[0]);
+
+        if (!resultString.length)
+            regexp.lastIndex = @advanceStringIndex(str, regexp.lastIndex, unicode);
+
+        resultList.@push(resultString);
+    }
+}
+
+function replace(strArg, replace)
+{
+    "use strict";
+
+    function getSubstitution(matched, str, position, captures, replacement)
+    {
+        "use strict";
+
+        let matchLength = matched.length;
+        let stringLength = str.length;
+        let tailPos = position + matchLength;
+        let m = captures.length;
+        let replacementLength = replacement.length;
+        let result = "";
+        let lastStart = 0;
+
+        for (let start = 0; start = replacement.indexOf("$", lastStart), start !== -1; lastStart = start) {
+            if (start - lastStart > 0)
+                result = result + replacement.substring(lastStart, start);
+            start++;
+            let ch = replacement.charAt(start);
+            if (ch === "")
+                result = result + "$";
+            else {
+                switch (ch)
+                {
+                case "$":
+                    result = result + "$";
+                    start++;
+                    break;
+                case "&":
+                    result = result + matched;
+                    start++;
+                    break;
+                case "`":
+                    if (position > 0)
+                        result = result + str.substring(0, position);
+                    start++;
+                    break;
+                case "'":
+                    if (tailPos < stringLength)
+                        result = result + str.substring(tailPos);
+                    start++;
+                    break;
+                default:
+                    let chCode = ch.charCodeAt(0);
+                    if (chCode >= 0x30 && chCode <= 0x39) {
+                        start++;
+                        let n = chCode - 0x30;
+                        if (n > m)
+                            break;
+                        if (start < replacementLength) {
+                            let nextChCode = replacement.charCodeAt(start);
+                            if (nextChCode >= 0x30 && nextChCode <= 0x39) {
+                                let nn = 10 * n + nextChCode - 0x30;
+                                if (nn <= m) {
+                                    n = nn;
+                                    start++;
+                                }
+                            }
+                        }
+
+                        if (n == 0)
+                            break;
+
+                        if (captures[n] != @undefined)
+                            result = result + captures[n];
+                    } else
+                        result = result + "$";
+                    break;
+                }
+            }
+        }
+
+        return result + replacement.substring(lastStart);
+    }
+
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@replace requires that |this| be an Object");
+
+    let regexp = this;
+
+    let str = @toString(strArg);
+    let stringLength = str.length;
+    let functionalReplace = typeof replace === 'function';
+
+    if (!functionalReplace)
+        replace = @toString(replace);
+
+    let global = regexp.global;
+    let unicode = false;
+
+    if (global) {
+        unicode = regexp.unicode;
+        regexp.lastIndex = 0;
+    }
+
+    let resultList = [];
+    let result;
+    let done = false;
+    while (!done) {
+        result = @regExpExec(regexp, str);
+
+        if (result === null)
+            done = true;
+        else {
+            resultList.@push(result);
+            if (!global)
+                done = true;
+            else {
+                let matchStr = @toString(result[0]);
+
+                if (!matchStr.length)
+                    regexp.lastIndex = @advanceStringIndex(str, regexp.lastIndex, unicode);
+            }
+        }
+    }
+
+    let accumulatedResult = "";
+    let nextSourcePosition = 0;
+    let lastPosition = 0;
+
+    for (result of resultList) {
+        let nCaptures = result.length - 1;
+        if (nCaptures < 0)
+            nCaptures = 0;
+        let matched = @toString(result[0]);
+        let matchLength = matched.length;
+        let position = result.index;
+        position = (position > stringLength) ? stringLength : position;
+        position = (position < 0) ? 0 : position;
+
+        let captures = [];
+        for (let n = 1; n <= nCaptures; n++) {
+            let capN = result[n];
+            if (capN !== @undefined)
+                capN = @toString(capN);
+            captures[n] = capN;
+        }
+
+        let replacement;
+
+        if (functionalReplace) {
+            let replacerArgs = [ matched ].concat(captures.slice(1));
+            replacerArgs.@push(position);
+            replacerArgs.@push(str);
+
+            let replValue = replace.@apply(@undefined, replacerArgs);
+            replacement = @toString(replValue);
+        } else
+            replacement = getSubstitution(matched, str, position, captures, replace);
+
+        if (position >= nextSourcePosition && position >= lastPosition) {
+            accumulatedResult = accumulatedResult + str.substring(nextSourcePosition, position) + replacement;
+            nextSourcePosition = position + matchLength;
+            lastPosition = position;
+        }
+    }
+
+    if (nextSourcePosition >= stringLength)
+        return  accumulatedResult;
+
+    return accumulatedResult + str.substring(nextSourcePosition);
+}
+
+// 21.2.5.9 RegExp.prototype[@@search] (string)
+function search(strArg)
+{
+    "use strict";
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (@isRegExpObject(regexp) && @tryGetById(regexp, "exec") === @regExpBuiltinExec)
+        return @regExpSearchFast.@call(regexp, strArg);
+
+    // 1. Let rx be the this value.
+    // 2. If Type(rx) is not Object, throw a TypeError exception.
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@search requires that |this| be an Object");
+
+    // 3. Let S be ? ToString(string).
+    let str = @toString(strArg)
+
+    // 4. Let previousLastIndex be ? Get(rx, "lastIndex").
+    let previousLastIndex = regexp.lastIndex;
+    // 5. Perform ? Set(rx, "lastIndex", 0, true).
+    regexp.lastIndex = 0;
+    // 6. Let result be ? RegExpExec(rx, S).
+    let result = @regExpExec(regexp, str);
+    // 7. Perform ? Set(rx, "lastIndex", previousLastIndex, true).
+    regexp.lastIndex = previousLastIndex;
+    // 8. If result is null, return -1.
+    if (result === null)
+        return -1;
+    // 9. Return ? Get(result, "index").
+    return result.index;
+}
+
+@globalPrivate
+function hasObservableSideEffectsForRegExpSplit(regexp) {
+    // This is accessed by the RegExpExec internal function.
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+    
+    // This is accessed by step 5 below.
+    let regexpFlags = @tryGetById(regexp, "flags");
+    if (regexpFlags !== @regExpProtoFlagsGetter)
+        return true;
+    
+    // These are accessed by the builtin flags getter.
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+    let regexpIgnoreCase = @tryGetById(regexp, "ignoreCase");
+    if (regexpIgnoreCase !== @regExpProtoIgnoreCaseGetter)
+        return true;
+    let regexpMultiline = @tryGetById(regexp, "multiline");
+    if (regexpMultiline !== @regExpProtoMultilineGetter)
+        return true;
+    let regexpSticky = @tryGetById(regexp, "sticky");
+    if (regexpSticky !== @regExpProtoStickyGetter)
+        return true;
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+    
+    // This is accessed by the RegExp species constructor.
+    let regexpSource = @tryGetById(regexp, "source");
+    if (regexpSource !== @regExpProtoSourceGetter)
+        return true;
+    
+    return !@isRegExpObject(regexp);
+}
+
+// ES 21.2.5.11 RegExp.prototype[@@split](string, limit)
+function split(string, limit)
+{
+    "use strict";
+
+    // 1. Let rx be the this value.
+    // 2. If Type(rx) is not Object, throw a TypeError exception.
+    if (!@isObject(this))
+        @throwTypeError("RegExp.prototype.@@split requires that |this| be an Object");
+    let regexp = this;
+
+    // 3. Let S be ? ToString(string).
+    let str = @toString(string);
+
+    // 4. Let C be ? SpeciesConstructor(rx, %RegExp%).
+    let speciesConstructor = @speciesConstructor(regexp, @RegExp);
+
+    if (speciesConstructor === @RegExp && !@hasObservableSideEffectsForRegExpSplit(regexp))
+        return @regExpSplitFast.@call(regexp, str, limit);
+
+    // 5. Let flags be ? ToString(? Get(rx, "flags")).
+    let flags = @toString(regexp.flags);
+
+    // 6. If flags contains "u", let unicodeMatching be true.
+    // 7. Else, let unicodeMatching be false.
+    let unicodeMatching = @stringIncludesInternal.@call(flags, "u");
+    // 8. If flags contains "y", let newFlags be flags.
+    // 9. Else, let newFlags be the string that is the concatenation of flags and "y".
+    let newFlags = @stringIncludesInternal.@call(flags, "y") ? flags : flags + "y";
+
+    // 10. Let splitter be ? Construct(C, « rx, newFlags »).
+    let splitter = new speciesConstructor(regexp, newFlags);
+
+    // We need to check again for RegExp subclasses that will fail the speciesConstructor test
+    // but can still use the fast path after we invoke the constructor above.
+    if (!@hasObservableSideEffectsForRegExpSplit(splitter))
+        return @regExpSplitFast.@call(splitter, str, limit);
+
+    // 11. Let A be ArrayCreate(0).
+    // 12. Let lengthA be 0.
+    let result = [];
+
+    // 13. If limit is undefined, let lim be 2^32-1; else let lim be ? ToUint32(limit).
+    limit = (limit === @undefined) ? 0xffffffff : limit >>> 0;
+
+    // 16. If lim = 0, return A.
+    if (!limit)
+        return result;
+
+    // 14. [Defered from above] Let size be the number of elements in S.
+    let size = str.length;
+
+    // 17. If size = 0, then
+    if (!size) {
+        // a. Let z be ? RegExpExec(splitter, S).
+        let z = @regExpExec(splitter, str);
+        // b. If z is not null, return A.
+        if (z != null)
+            return result;
+        // c. Perform ! CreateDataProperty(A, "0", S).
+        @putByValDirect(result, 0, str);
+        // d. Return A.
+        return result;
+    }
+
+    // 15. [Defered from above] Let p be 0.
+    let position = 0;
+    // 18. Let q be p.
+    let matchPosition = 0;
+
+    // 19. Repeat, while q < size
+    while (matchPosition < size) {
+        // a. Perform ? Set(splitter, "lastIndex", q, true).
+        splitter.lastIndex = matchPosition;
+        // b. Let z be ? RegExpExec(splitter, S).
+        let matches = @regExpExec(splitter, str);
+        // c. If z is null, let q be AdvanceStringIndex(S, q, unicodeMatching).
+        if (matches === null)
+            matchPosition = @advanceStringIndex(str, matchPosition, unicodeMatching);
+        // d. Else z is not null,
+        else {
+            // i. Let e be ? ToLength(? Get(splitter, "lastIndex")).
+            let endPosition = @toLength(splitter.lastIndex);
+            // ii. Let e be min(e, size).
+            endPosition = (endPosition <= size) ? endPosition : size;
+            // iii. If e = p, let q be AdvanceStringIndex(S, q, unicodeMatching).
+            if (endPosition === position)
+                matchPosition = @advanceStringIndex(str, matchPosition, unicodeMatching);
+            // iv. Else e != p,
+            else {
+                // 1. Let T be a String value equal to the substring of S consisting of the elements at indices p (inclusive) through q (exclusive).
+                let subStr = @stringSubstrInternal.@call(str, position, matchPosition - position);
+                // 2. Perform ! CreateDataProperty(A, ! ToString(lengthA), T).
+                // 3. Let lengthA be lengthA + 1.
+                @putByValDirect(result, result.length, subStr);
+                // 4. If lengthA = lim, return A.
+                if (result.length == limit)
+                    return result;
+
+                // 5. Let p be e.
+                position = endPosition;
+                // 6. Let numberOfCaptures be ? ToLength(? Get(z, "length")).
+                // 7. Let numberOfCaptures be max(numberOfCaptures-1, 0).
+                let numberOfCaptures = matches.length > 1 ? matches.length - 1 : 0;
+
+                // 8. Let i be 1.
+                let i = 1;
+                // 9. Repeat, while i <= numberOfCaptures,
+                while (i <= numberOfCaptures) {
+                    // a. Let nextCapture be ? Get(z, ! ToString(i)).
+                    let nextCapture = matches[i];
+                    // b. Perform ! CreateDataProperty(A, ! ToString(lengthA), nextCapture).
+                    // d. Let lengthA be lengthA + 1.
+                    @putByValDirect(result, result.length, nextCapture);
+                    // e. If lengthA = lim, return A.
+                    if (result.length == limit)
+                        return result;
+                    // c. Let i be i + 1.
+                    i++;
+                }
+                // 10. Let q be p.
+                matchPosition = position;
+            }
+        }
+    }
+    // 20. Let T be a String value equal to the substring of S consisting of the elements at indices p (inclusive) through size (exclusive).
+    let remainingStr = @stringSubstrInternal.@call(str, position, size);
+    // 21. Perform ! CreateDataProperty(A, ! ToString(lengthA), T).
+    @putByValDirect(result, result.length, remainingStr);
+    // 22. Return A.
+    return result;
+}
+
+// ES 21.2.5.13 RegExp.prototype.test(string)
+@intrinsic=RegExpTestIntrinsic
+function test(strArg)
+{
+    "use strict";
+
+    let regexp = this;
+
+    // Check for observable side effects and call the fast path if there aren't any.
+    if (@isRegExpObject(regexp) && @tryGetById(regexp, "exec") === @regExpBuiltinExec)
+        return @regExpTestFast.@call(regexp, strArg);
+
+    // 1. Let R be the this value.
+    // 2. If Type(R) is not Object, throw a TypeError exception.
+    if (!@isObject(regexp))
+        @throwTypeError("RegExp.prototype.test requires that |this| be an Object");
+
+    // 3. Let string be ? ToString(S).
+    let str = @toString(strArg);
+
+    // 4. Let match be ? RegExpExec(R, string).
+    let match = @regExpExec(regexp, str);
+
+    // 5. If match is not null, return true; else return false.
+    if (match !== null)
+        return true;
+    return false;
+}
diff --git a/builtins/SetPrototype.js b/builtins/SetPrototype.js
new file mode 100644
index 0000000..e9b6626
--- /dev/null
+++ b/builtins/SetPrototype.js
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function forEach(callback /*, thisArg */)
+{
+    "use strict";
+
+    if (!@isSet(this))
+        @throwTypeError("Set operation called on non-Set object");
+
+    if (typeof callback !== 'function')
+        @throwTypeError("Set.prototype.forEach callback must be a function");
+
+    var thisArg = @argument(1);
+    var iterator = @SetIterator(this);
+
+    // To avoid object allocations for iterator result objects, we pass the placeholder to the special "next" function in order to fill the results.
+    var value = [ @undefined ];
+    for (;;) {
+        if (@setIteratorNext.@call(iterator, value))
+            break;
+        callback.@call(thisArg, value[0], value[0], this);
+    }
+}
diff --git a/builtins/StringConstructor.js b/builtins/StringConstructor.js
new file mode 100644
index 0000000..a329332
--- /dev/null
+++ b/builtins/StringConstructor.js
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function raw(template)
+{
+    "use strict";
+
+    if (template === null || template === @undefined)
+        @throwTypeError("String.raw requires template not be null or undefined");
+    var cookedSegments = @Object(template);
+
+    var rawValue = cookedSegments.raw;
+    if (rawValue === null || rawValue === @undefined)
+        @throwTypeError("String.raw requires template.raw not be null or undefined");
+    var rawSegments = @Object(rawValue);
+
+    var numberOfSubstitutions = arguments.length - 1;
+
+    var segmentCount = @toLength(rawSegments.length);
+
+    if (segmentCount <= 0)
+        return '';
+
+    var stringElements = '';
+    for (var i = 0; ; ++i) {
+        var segment = @toString(rawSegments[i]);
+        stringElements += segment;
+
+        if ((i + 1) === segmentCount)
+            return stringElements;
+
+        if (i < numberOfSubstitutions) {
+            var substitutionIndexInArguments = i + 1;
+            var next = @toString(arguments[substitutionIndexInArguments]);
+            stringElements += next;
+        }
+    }
+}
diff --git a/builtins/StringIteratorPrototype.js b/builtins/StringIteratorPrototype.js
new file mode 100644
index 0000000..52762db
--- /dev/null
+++ b/builtins/StringIteratorPrototype.js
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function next()
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("%StringIteratorPrototype%.next requires that |this| not be null or undefined");
+
+    var position = this.@stringIteratorNextIndex;
+    if (position === @undefined)
+        @throwTypeError("%StringIteratorPrototype%.next requires that |this| be a String Iterator instance");
+
+    var done = true;
+    var value = @undefined;
+
+    var string = this.@iteratedString;
+    if (string !== @undefined) {
+        var length = string.length >>> 0;
+        if (position >= length) {
+            this.@iteratedString = @undefined;
+        } else {
+            done = false;
+
+            var first = string.@charCodeAt(position);
+            if (first < 0xD800 || first > 0xDBFF || position + 1 === length)
+                value = string[position];
+            else {
+                var second = string.@charCodeAt(position + 1);
+                if (second < 0xDC00 || second > 0xDFFF)
+                    value = string[position];
+                else
+                    value = string[position] + string[position + 1];
+            }
+
+            this.@stringIteratorNextIndex = position + value.length;
+        }
+    }
+
+    return {done, value};
+}
diff --git a/builtins/StringPrototype.js b/builtins/StringPrototype.js
new file mode 100644
index 0000000..c9cdfcf
--- /dev/null
+++ b/builtins/StringPrototype.js
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2015 Andy VanWagoner .
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function match(regexp)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.match requires that |this| not be null or undefined");
+
+    if (regexp != null) {
+        var matcher = regexp.@matchSymbol;
+        if (matcher != @undefined)
+            return matcher.@call(regexp, this);
+    }
+
+    let thisString = @toString(this);
+    let createdRegExp = @regExpCreate(regexp, @undefined);
+    return createdRegExp.@matchSymbol(thisString);
+}
+
+@globalPrivate
+function repeatSlowPath(string, count)
+{
+    "use strict";
+
+    // Return an empty string.
+    if (count === 0 || string.length === 0)
+        return "";
+
+    // Return the original string.
+    if (count === 1)
+        return string;
+
+    if (string.length * count > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    // Bit operation onto |count| is safe because |count| should be within Int32 range,
+    // Repeat log N times to generate the repeated string rope.
+    var result = "";
+    var operand = string;
+    while (true) {
+        if (count & 1)
+            result += operand;
+        count >>= 1;
+        if (!count)
+            return result;
+        operand += operand;
+    }
+}
+
+@globalPrivate
+function repeatCharactersSlowPath(string, count)
+{
+    "use strict";
+    var repeatCount = (count / string.length) | 0;
+    var remainingCharacters = count - repeatCount * string.length;
+    var result = "";
+    var operand = string;
+    // Bit operation onto |repeatCount| is safe because |repeatCount| should be within Int32 range,
+    // Repeat log N times to generate the repeated string rope.
+    while (true) {
+        if (repeatCount & 1)
+            result += operand;
+        repeatCount >>= 1;
+        if (!repeatCount)
+            break;
+        operand += operand;
+    }
+    if (remainingCharacters)
+        result += @stringSubstrInternal.@call(string, 0, remainingCharacters);
+    return result;
+}
+
+
+function repeat(count)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.repeat requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    count = @toInteger(count);
+
+    if (count < 0 || count === @Infinity)
+        @throwRangeError("String.prototype.repeat argument must be greater than or equal to 0 and not be Infinity");
+
+    if (string.length === 1)
+        return @repeatCharacter(string, count);
+
+    return @repeatSlowPath(string, count);
+}
+
+function padStart(maxLength/*, fillString*/)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.padStart requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    maxLength = @toLength(maxLength);
+
+    var stringLength = string.length;
+    if (maxLength <= stringLength)
+        return string;
+
+    var filler;
+    var fillString = @argument(1);
+    if (fillString === @undefined)
+        filler = " ";
+    else {
+        filler = @toString(fillString);
+        if (filler === "")
+            return string;
+    }
+
+    if (maxLength > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    var fillLength = maxLength - stringLength;
+    var truncatedStringFiller;
+
+    if (filler.length === 1)
+        truncatedStringFiller = @repeatCharacter(filler, fillLength);
+    else
+        truncatedStringFiller = @repeatCharactersSlowPath(filler, fillLength);
+    return truncatedStringFiller + string;
+}
+
+function padEnd(maxLength/*, fillString*/)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.padEnd requires that |this| not be null or undefined");
+
+    var string = @toString(this);
+    maxLength = @toLength(maxLength);
+
+    var stringLength = string.length;
+    if (maxLength <= stringLength)
+        return string;
+
+    var filler;
+    var fillString = @argument(1);
+    if (fillString === @undefined)
+        filler = " ";
+    else {
+        filler = @toString(fillString);
+        if (filler === "")
+            return string;
+    }
+
+    if (maxLength > @MAX_STRING_LENGTH)
+        @throwOutOfMemoryError();
+
+    var fillLength = maxLength - stringLength;
+    var truncatedStringFiller;
+
+    if (filler.length === 1)
+        truncatedStringFiller = @repeatCharacter(filler, fillLength);
+    else
+        truncatedStringFiller = @repeatCharactersSlowPath(filler, fillLength);
+    return string + truncatedStringFiller;
+}
+
+@globalPrivate
+function hasObservableSideEffectsForStringReplace(regexp, replacer) {
+    if (replacer !== @regExpPrototypeSymbolReplace)
+        return true;
+    
+    let regexpExec = @tryGetById(regexp, "exec");
+    if (regexpExec !== @regExpBuiltinExec)
+        return true;
+
+    let regexpGlobal = @tryGetById(regexp, "global");
+    if (regexpGlobal !== @regExpProtoGlobalGetter)
+        return true;
+
+    let regexpUnicode = @tryGetById(regexp, "unicode");
+    if (regexpUnicode !== @regExpProtoUnicodeGetter)
+        return true;
+
+    return !@isRegExpObject(regexp);
+}
+
+@intrinsic=StringPrototypeReplaceIntrinsic
+function replace(search, replace)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.replace requires that |this| not be null or undefined");
+
+    if (search != null) {
+        let replacer = search.@replaceSymbol;
+        if (replacer !== @undefined) {
+            if (!@hasObservableSideEffectsForStringReplace(search, replacer))
+                return @toString(this).@replaceUsingRegExp(search, replace);
+            return replacer.@call(search, this, replace);
+        }
+    }
+
+    let thisString = @toString(this);
+    let searchString = @toString(search);
+    return thisString.@replaceUsingStringSearch(searchString, replace);
+}
+    
+function localeCompare(that/*, locales, options */)
+{
+    "use strict";
+
+    // 13.1.1 String.prototype.localeCompare (that [, locales [, options ]]) (ECMA-402 2.0)
+    // http://ecma-international.org/publications/standards/Ecma-402.htm
+
+    // 1. Let O be RequireObjectCoercible(this value).
+    if (this == null)
+        @throwTypeError("String.prototype.localeCompare requires that |this| not be null or undefined");
+
+    // 2. Let S be ToString(O).
+    // 3. ReturnIfAbrupt(S).
+    var thisString = @toString(this);
+
+    // 4. Let That be ToString(that).
+    // 5. ReturnIfAbrupt(That).
+    var thatString = @toString(that);
+
+    // Avoid creating a collator for defaults.
+    var locales = @argument(1);
+    var options = @argument(2);
+    if (locales === @undefined && options === @undefined)
+        return @Collator.prototype.compare(thisString, thatString);
+
+    // 6. Let collator be Construct(%Collator%, «locales, options»).
+    // 7. ReturnIfAbrupt(collator).
+    var collator = new @Collator(locales, options);
+
+    // 8. Return CompareStrings(collator, S, That).
+    return collator.compare(thisString, thatString);
+}
+
+function search(regexp)
+{
+    "use strict";
+
+    if (this == null)
+        @throwTypeError("String.prototype.search requires that |this| not be null or undefined");
+
+    if (regexp != null) {
+        var searcher = regexp.@searchSymbol;
+        if (searcher != @undefined)
+            return searcher.@call(regexp, this);
+    }
+
+    var thisString = @toString(this);
+    var createdRegExp = @regExpCreate(regexp, @undefined);
+    return createdRegExp.@searchSymbol(thisString);
+}
+
+function split(separator, limit)
+{
+    "use strict";
+    
+    if (this == null)
+        @throwTypeError("String.prototype.split requires that |this| not be null or undefined");
+    
+    if (separator != null) {
+        var splitter = separator.@splitSymbol;
+        if (splitter != @undefined)
+            return splitter.@call(separator, this, limit);
+    }
+    
+    return @stringSplitFast.@call(this, separator, limit);
+}
diff --git a/builtins/TypedArrayConstructor.js b/builtins/TypedArrayConstructor.js
new file mode 100644
index 0000000..54a957b
--- /dev/null
+++ b/builtins/TypedArrayConstructor.js
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// According to the spec we are supposed to crawl the prototype chain looking
+// for the a TypedArray constructor. The way we implement this is with a
+// private function, @alloctateTypedArray, on each of the prototypes.
+// This enables us to optimize this lookup in the inline cache.
+
+function of(/* items... */)
+{
+    "use strict";
+    let len = arguments.length;
+    let constructFunction = this.@allocateTypedArray;
+    if (constructFunction === @undefined)
+        @throwTypeError("TypedArray.of requires its this argument to subclass a TypedArray constructor");
+
+    let result = constructFunction(len);
+
+    for (let i = 0; i < len; i++)
+        result[i] = arguments[i];
+
+    return result;
+}
+
+function from(items /* [ , mapfn [ , thisArg ] ] */)
+{
+    "use strict";
+
+    let mapFn = @argument(1);
+
+    let thisArg;
+
+    if (mapFn !== @undefined) {
+        if (typeof mapFn !== "function")
+            @throwTypeError("TypedArray.from requires that the second argument, when provided, be a function");
+
+        thisArg = @argument(2);
+    }
+
+    if (items == null)
+        @throwTypeError("TypedArray.from requires an array-like object - not null or undefined");
+
+    let iteratorMethod = items.@iteratorSymbol;
+    if (iteratorMethod != null) {
+        if (typeof iteratorMethod !== "function")
+            @throwTypeError("TypedArray.from requires that the property of the first argument, items[Symbol.iterator], when exists, be a function");
+
+        let accumulator = [];
+
+        let k = 0;
+        let iterator = iteratorMethod.@call(items);
+
+        // Since for-of loop once more looks up the @@iterator property of a given iterable,
+        // it could be observable if the user defines a getter for @@iterator.
+        // To avoid this situation, we define a wrapper object that @@iterator just returns a given iterator.
+        let wrapper = {};
+        wrapper.@iteratorSymbol = function() { return iterator; }
+
+        for (let value of wrapper) {
+            if (mapFn)
+                @putByValDirect(accumulator, k, thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k));
+            else
+                @putByValDirect(accumulator, k, value);
+            k++;
+        }
+
+        let constructFunction = this.@allocateTypedArray;
+        if (constructFunction === @undefined)
+            @throwTypeError("TypedArray.from requires its this argument subclass a TypedArray constructor");
+
+        let result = constructFunction(k);
+
+        for (let i = 0; i < k; i++) 
+            result[i] = accumulator[i];
+
+
+        return result;
+    }
+
+    let arrayLike = @Object(items);
+    let arrayLikeLength = @toLength(arrayLike.length);
+
+    let constructFunction = this.@allocateTypedArray;
+    if (constructFunction === @undefined)
+        @throwTypeError("this does not subclass a TypedArray constructor");
+
+    let result = constructFunction(arrayLikeLength);
+
+    let k = 0;
+    while (k < arrayLikeLength) {
+        let value = arrayLike[k];
+        if (mapFn)
+            result[k] = thisArg === @undefined ? mapFn(value, k) : mapFn.@call(thisArg, value, k);
+        else
+            result[k] = value;
+        k++;
+    }
+
+    return result;
+}
+
+function allocateInt8Array(length)
+{
+    return new @Int8Array(length);
+}
+
+function allocateInt16Array(length)
+{
+    return new @Int16Array(length);    
+}
+
+function allocateInt32Array(length)
+{
+    return new @Int32Array(length);   
+}
+
+function allocateUint32Array(length)
+{
+    return new @Uint32Array(length);
+}
+
+function allocateUint16Array(length)
+{
+    return new @Uint16Array(length);   
+}
+
+function allocateUint8Array(length)
+{
+    return new @Uint8Array(length);   
+}
+
+function allocateUint8ClampedArray(length)
+{
+    return new @Uint8ClampedArray(length);
+}
+
+function allocateFloat32Array(length)
+{
+    return new @Float32Array(length);
+}
+
+function allocateFloat64Array(length)
+{
+    return new @Float64Array(length);
+}
diff --git a/builtins/TypedArrayPrototype.js b/builtins/TypedArrayPrototype.js
new file mode 100644
index 0000000..53674bf
--- /dev/null
+++ b/builtins/TypedArrayPrototype.js
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Note that the intrisic @typedArrayLength checks that the argument passed is a typed array
+// and throws if it is not.
+
+
+// Typed Arrays have their own species constructor function since they need
+// to look up their default constructor, which is expensive. If we used the
+// normal speciesConstructor helper we would need to look up the default
+// constructor every time.
+@globalPrivate
+function typedArraySpeciesConstructor(value)
+{
+    "use strict";
+    let constructor = value.constructor;
+    if (constructor === @undefined)
+        return @typedArrayGetOriginalConstructor(value);
+
+    if (!@isObject(constructor))
+        @throwTypeError("|this|.constructor is not an Object or undefined");
+
+    constructor = constructor.@speciesSymbol;
+    if (constructor == null)
+        return @typedArrayGetOriginalConstructor(value);
+    // The lack of an @isConstructor(constructor) check here is not observable because
+    // the first thing we will do with the value is attempt to construct the result with it.
+    // If any user of this function does not immediately construct the result they need to
+    // verify that the result is a constructor.
+    return constructor;
+}
+
+@globalPrivate
+function typedArrayClampArgumentToStartOrEnd(value, length, undefinedValue)
+{
+    "use strict";
+
+    if (value === @undefined)
+        return undefinedValue;
+
+    let int = @toInteger(value);
+    if (int < 0) {
+        int += length;
+        return int < 0 ? 0 : int;
+    }
+    return int > length ? length : int;
+}
+
+function values()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "value", @arrayIteratorValueNext);
+}
+
+function keys()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "key", @arrayIteratorKeyNext);
+}
+
+function entries()
+{
+    "use strict";
+    @typedArrayLength(this);
+    return new @createArrayIterator(this, "key+value", @arrayIteratorKeyValueNext);
+}
+
+function every(callback /*, thisArg */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.every callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (!callback.@call(thisArg, this[i], i, this))
+            return false;
+    }
+
+    return true;
+}
+
+function fill(value /* [, start [, end]] */)
+{
+    "use strict";
+
+    let length = @typedArrayLength(this);
+
+    let start = @argument(1);
+    let end = @argument(2);
+
+    start = @typedArrayClampArgumentToStartOrEnd(start, length, 0);
+    end = @typedArrayClampArgumentToStartOrEnd(end, length, length);
+
+    for (let i = start; i < end; i++)
+        this[i] = value;
+    return this;
+}
+
+function find(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.find callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        let elem = this[i];
+        if (callback.@call(thisArg, elem, i, this))
+            return elem;
+    }
+    return @undefined;
+}
+
+function findIndex(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.findIndex callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, this[i], i, this))
+            return i;
+    }
+    return -1;
+}
+
+function forEach(callback /* [, thisArg] */)
+{
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.forEach callback must be a function");
+
+    for (var i = 0; i < length; i++)
+        callback.@call(thisArg, this[i], i, this);
+}
+
+function some(callback /* [, thisArg] */)
+{
+    // 22.2.3.24
+    "use strict";
+    var length = @typedArrayLength(this);
+    var thisArg = @argument(1);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.some callback must be a function");
+
+    for (var i = 0; i < length; i++) {
+        if (callback.@call(thisArg, this[i], i, this))
+            return true;
+    }
+
+    return false;
+}
+
+function sort(comparator)
+{
+    // 22.2.3.25
+    "use strict";
+
+    function min(a, b)
+    {
+        return a < b ? a : b;
+    }
+
+    function merge(dst, src, srcIndex, srcEnd, width, comparator)
+    {
+        var left = srcIndex;
+        var leftEnd = min(left + width, srcEnd);
+        var right = leftEnd;
+        var rightEnd = min(right + width, srcEnd);
+
+        for (var dstIndex = left; dstIndex < rightEnd; ++dstIndex) {
+            if (right < rightEnd) {
+                if (left >= leftEnd || comparator(src[right], src[left]) < 0) {
+                    dst[dstIndex] = src[right++];
+                    continue;
+                }
+            }
+
+            dst[dstIndex] = src[left++];
+        }
+    }
+
+    function mergeSort(array, valueCount, comparator)
+    {
+        var buffer = [ ];
+        buffer.length = valueCount;
+
+        var dst = buffer;
+        var src = array;
+
+        for (var width = 1; width < valueCount; width *= 2) {
+            for (var srcIndex = 0; srcIndex < valueCount; srcIndex += 2 * width)
+                merge(dst, src, srcIndex, valueCount, width, comparator);
+
+            var tmp = src;
+            src = dst;
+            dst = tmp;
+        }
+
+        if (src != array) {
+            for(var i = 0; i < valueCount; i++)
+                array[i] = src[i];
+        }
+    }
+
+    var length = @typedArrayLength(this);
+
+    if (length < 2)
+        return;
+
+    if (typeof comparator == "function")
+        mergeSort(this, length, comparator);
+    else
+        @typedArraySort(this);
+    
+    return this;
+}
+
+function subarray(begin, end)
+{
+    "use strict";
+
+    if (!@isTypedArrayView(this))
+        @throwTypeError("|this| should be a typed array view");
+
+    let start = @toInteger(begin);
+    let finish;
+    if (end !== @undefined)
+        finish = @toInteger(end);
+
+    let constructor = @typedArraySpeciesConstructor(this);
+
+    return @typedArraySubarrayCreate.@call(this, start, finish, constructor);
+}
+
+function reduce(callback /* [, initialValue] */)
+{
+    // 22.2.3.19
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.reduce callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("TypedArray.prototype.reduce of empty array with no initial value");
+
+    var accumulator, k = 0;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else
+        accumulator = this[k++];
+
+    for (; k < length; k++)
+        accumulator = callback.@call(@undefined, accumulator, this[k], k, this);
+
+    return accumulator;
+}
+
+function reduceRight(callback /* [, initialValue] */)
+{
+    // 22.2.3.20
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.reduceRight callback must be a function");
+
+    var argumentCount = @argumentCount();
+    if (length === 0 && argumentCount < 2)
+        @throwTypeError("TypedArray.prototype.reduceRight of empty array with no initial value");
+
+    var accumulator, k = length - 1;
+    if (argumentCount > 1)
+        accumulator = @argument(1);
+    else
+        accumulator = this[k--];
+
+    for (; k >= 0; k--)
+        accumulator = callback.@call(@undefined, accumulator, this[k], k, this);
+
+    return accumulator;
+}
+
+function map(callback /*, thisArg */)
+{
+    // 22.2.3.18
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.map callback must be a function");
+
+    var thisArg = @argument(1);
+
+    // Do species construction
+    var constructor = this.constructor;
+    var result;
+    if (constructor === @undefined)
+        result = new (@typedArrayGetOriginalConstructor(this))(length);
+    else {
+        var speciesConstructor = @Object(constructor).@speciesSymbol;
+        if (speciesConstructor === null || speciesConstructor === @undefined)
+            result = new (@typedArrayGetOriginalConstructor(this))(length);
+        else {
+            result = new speciesConstructor(length);
+            // typedArrayLength throws if it doesn't get a view.
+            @typedArrayLength(result);
+        }
+    }
+
+    for (var i = 0; i < length; i++) {
+        var mappedValue = callback.@call(thisArg, this[i], i, this);
+        result[i] = mappedValue;
+    }
+    return result;
+}
+
+function filter(callback /*, thisArg */)
+{
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (typeof callback !== "function")
+        @throwTypeError("TypedArray.prototype.filter callback must be a function");
+
+    var thisArg = @argument(1);
+    var kept = [];
+
+    for (var i = 0; i < length; i++) {
+        var value = this[i];
+        if (callback.@call(thisArg, value, i, this))
+            kept.@push(value);
+    }
+
+    var constructor = this.constructor;
+    var result;
+    var resultLength = kept.length;
+    if (constructor === @undefined)
+        result = new (@typedArrayGetOriginalConstructor(this))(resultLength);
+    else {
+        var speciesConstructor = @Object(constructor).@speciesSymbol;
+        if (speciesConstructor === null || speciesConstructor === @undefined)
+            result = new (@typedArrayGetOriginalConstructor(this))(resultLength);
+        else {
+            result = new speciesConstructor(resultLength);
+            // typedArrayLength throws if it doesn't get a view.
+            @typedArrayLength(result);
+        }
+    }
+
+    for (var i = 0; i < kept.length; i++)
+        result[i] = kept[i];
+
+    return result;
+}
+
+function toLocaleString()
+{
+    "use strict";
+
+    var length = @typedArrayLength(this);
+
+    if (length == 0)
+        return "";
+
+    var string = this[0].toLocaleString();
+    for (var i = 1; i < length; i++)
+        string += "," + this[i].toLocaleString();
+
+    return string;
+}
diff --git a/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp b/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
new file mode 100644
index 0000000..2fd7031
--- /dev/null
+++ b/bytecode/AdaptiveInferredPropertyValueWatchpointBase.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AdaptiveInferredPropertyValueWatchpointBase.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+AdaptiveInferredPropertyValueWatchpointBase::AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition& key)
+    : m_key(key)
+{
+    RELEASE_ASSERT(key.kind() == PropertyCondition::Equivalence);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::install()
+{
+    RELEASE_ASSERT(m_key.isWatchable());
+
+    m_key.object()->structure()->addTransitionWatchpoint(&m_structureWatchpoint);
+
+    PropertyOffset offset = m_key.object()->structure()->getConcurrently(m_key.uid());
+    WatchpointSet* set = m_key.object()->structure()->propertyReplacementWatchpointSet(offset);
+    set->add(&m_propertyWatchpoint);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::fire(const FireDetail& detail)
+{
+    // One of the watchpoints fired, but the other one didn't. Make sure that neither of them are
+    // in any set anymore. This simplifies things by allowing us to reinstall the watchpoints
+    // wherever from scratch.
+    if (m_structureWatchpoint.isOnList())
+        m_structureWatchpoint.remove();
+    if (m_propertyWatchpoint.isOnList())
+        m_propertyWatchpoint.remove();
+
+    if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        install();
+        return;
+    }
+
+    handleFire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::StructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+    ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_structureWatchpoint);
+
+    AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast(bitwise_cast(this) - myOffset);
+
+    parent->fire(detail);
+}
+
+void AdaptiveInferredPropertyValueWatchpointBase::PropertyWatchpoint::fireInternal(const FireDetail& detail)
+{
+    ptrdiff_t myOffset = OBJECT_OFFSETOF(AdaptiveInferredPropertyValueWatchpointBase, m_propertyWatchpoint);
+
+    AdaptiveInferredPropertyValueWatchpointBase* parent = bitwise_cast(bitwise_cast(this) - myOffset);
+    
+    parent->fire(detail);
+}
+    
+} // namespace JSC
diff --git a/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h b/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
new file mode 100644
index 0000000..410a93f
--- /dev/null
+++ b/bytecode/AdaptiveInferredPropertyValueWatchpointBase.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+#include 
+#include 
+
+namespace JSC {
+
+class AdaptiveInferredPropertyValueWatchpointBase {
+    WTF_MAKE_NONCOPYABLE(AdaptiveInferredPropertyValueWatchpointBase);
+    WTF_MAKE_FAST_ALLOCATED;
+
+public:
+    AdaptiveInferredPropertyValueWatchpointBase(const ObjectPropertyCondition&);
+
+    const ObjectPropertyCondition& key() const { return m_key; }
+
+    void install();
+
+    virtual ~AdaptiveInferredPropertyValueWatchpointBase() = default;
+
+protected:
+    virtual void handleFire(const FireDetail&) = 0;
+
+private:
+    class StructureWatchpoint : public Watchpoint {
+    public:
+        StructureWatchpoint() { }
+    protected:
+        void fireInternal(const FireDetail&) override;
+    };
+    class PropertyWatchpoint : public Watchpoint {
+    public:
+        PropertyWatchpoint() { }
+    protected:
+        void fireInternal(const FireDetail&) override;
+    };
+
+    void fire(const FireDetail&);
+
+    ObjectPropertyCondition m_key;
+    StructureWatchpoint m_structureWatchpoint;
+    PropertyWatchpoint m_propertyWatchpoint;
+};
+
+} // namespace JSC
diff --git a/bytecode/ArithProfile.cpp b/bytecode/ArithProfile.cpp
new file mode 100644
index 0000000..1fa7c79
--- /dev/null
+++ b/bytecode/ArithProfile.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ArithProfile.h"
+
+#include "CCallHelpers.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
+{
+    if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber())
+        return;
+
+    CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode);
+    CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode);
+    emitSetDouble(jit);
+    CCallHelpers::Jump done = jit.jump();
+    notDouble.link(&jit);
+    emitSetNonNumber(jit);
+    done.link(&jit);
+    isInt32.link(&jit);
+}
+
+bool ArithProfile::shouldEmitSetDouble() const
+{
+    uint32_t mask = ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble;
+    return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetDouble(CCallHelpers& jit) const
+{
+    if (shouldEmitSetDouble())
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int32Overflow | ArithProfile::Int52Overflow | ArithProfile::NegZeroDouble | ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+
+bool ArithProfile::shouldEmitSetNonNumber() const
+{
+    uint32_t mask = ArithProfile::NonNumber;
+    return (m_bits & mask) != mask;
+}
+
+void ArithProfile::emitSetNonNumber(CCallHelpers& jit) const
+{
+    if (shouldEmitSetNonNumber())
+        jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNumber), CCallHelpers::AbsoluteAddress(addressOfBits()));
+}
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+namespace WTF {
+    
+using namespace JSC;
+
+void printInternal(PrintStream& out, const ArithProfile& profile)
+{
+    const char* separator = "";
+
+    out.print("Result:<");
+    if (!profile.didObserveNonInt32()) {
+        out.print("Int32");
+        separator = "|";
+    } else {
+        if (profile.didObserveNegZeroDouble()) {
+            out.print(separator, "NegZeroDouble");
+            separator = "|";
+        }
+        if (profile.didObserveNonNegZeroDouble()) {
+            out.print(separator, "NonNegZeroDouble");
+            separator = "|";
+        }
+        if (profile.didObserveNonNumber()) {
+            out.print(separator, "NonNumber");
+            separator = "|";
+        }
+        if (profile.didObserveInt32Overflow()) {
+            out.print(separator, "Int32Overflow");
+            separator = "|";
+        }
+        if (profile.didObserveInt52Overflow()) {
+            out.print(separator, "Int52Overflow");
+            separator = "|";
+        }
+    }
+    if (profile.tookSpecialFastPath())
+        out.print(separator, "Took special fast path.");
+    out.print(">");
+
+    out.print(" LHS ObservedType:<");
+    out.print(profile.lhsObservedType());
+    out.print("> RHS ObservedType:<");
+    out.print(profile.rhsObservedType());
+    out.print(">");
+
+    out.print(" LHS ResultType:<", RawPointer(bitwise_cast(static_cast(profile.lhsResultType().bits()))));
+    out.print("> RHS ResultType:<", RawPointer(bitwise_cast(static_cast(profile.rhsResultType().bits()))));
+    out.print(">");
+}
+
+void printInternal(PrintStream& out, const JSC::ObservedType& observedType)
+{
+    const char* separator = "";
+    if (observedType.sawInt32()) {
+        out.print(separator, "Int32");
+        separator = "|";
+    }
+    if (observedType.sawNumber()) {
+        out.print(separator, "Number");
+        separator = "|";
+    }
+    if (observedType.sawNonNumber()) {
+        out.print(separator, "NonNumber");
+        separator = "|";
+    }
+}
+
+} // namespace WTF
diff --git a/bytecode/ArithProfile.h b/bytecode/ArithProfile.h
new file mode 100644
index 0000000..40fad1b
--- /dev/null
+++ b/bytecode/ArithProfile.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+#include "ResultType.h"
+#include "TagRegistersMode.h"
+
+namespace JSC {
+
+class CCallHelpers;
+
+struct ObservedType {
+    ObservedType(uint8_t bits = TypeEmpty)
+        : m_bits(bits)
+    { }
+
+    bool sawInt32() const { return m_bits & TypeInt32; }
+    bool isOnlyInt32() const { return m_bits == TypeInt32; }
+    bool sawNumber() const { return m_bits & TypeNumber; }
+    bool isOnlyNumber() const { return m_bits == TypeNumber; }
+    bool sawNonNumber() const { return m_bits & TypeNonNumber; }
+    bool isOnlyNonNumber() const { return m_bits == TypeNonNumber; }
+    bool isEmpty() const { return !m_bits; }
+    uint8_t bits() const { return m_bits; }
+
+    ObservedType withInt32() const { return ObservedType(m_bits | TypeInt32); }
+    ObservedType withNumber() const { return ObservedType(m_bits | TypeNumber); }
+    ObservedType withNonNumber() const { return ObservedType(m_bits | TypeNonNumber); }
+    ObservedType withoutNonNumber() const { return ObservedType(m_bits & ~TypeNonNumber); }
+
+    bool operator==(const ObservedType& other) const { return m_bits == other.m_bits; }
+
+    static const uint8_t TypeEmpty = 0x0;
+    static const uint8_t TypeInt32 = 0x1;
+    static const uint8_t TypeNumber = 0x02;
+    static const uint8_t TypeNonNumber = 0x04;
+
+    static const uint32_t numBitsNeeded = 3;
+
+private:
+    uint8_t m_bits;
+};
+
+struct ArithProfile {
+private:
+    static const uint32_t numberOfFlagBits = 5;
+    static const uint32_t rhsResultTypeShift = numberOfFlagBits;
+    static const uint32_t lhsResultTypeShift = rhsResultTypeShift + ResultType::numBitsNeeded;
+    static const uint32_t rhsObservedTypeShift = lhsResultTypeShift + ResultType::numBitsNeeded;
+    static const uint32_t lhsObservedTypeShift = rhsObservedTypeShift + ObservedType::numBitsNeeded;
+
+    static_assert(ObservedType::numBitsNeeded == 3, "We make a hard assumption about that here.");
+    static const uint32_t clearRhsObservedTypeBitMask = static_cast(~((1 << rhsObservedTypeShift) | (1 << (rhsObservedTypeShift + 1)) | (1 << (rhsObservedTypeShift + 2))));
+    static const uint32_t clearLhsObservedTypeBitMask = static_cast(~((1 << lhsObservedTypeShift) | (1 << (lhsObservedTypeShift + 1)) | (1 << (lhsObservedTypeShift + 2))));
+
+    static const uint32_t resultTypeMask = (1 << ResultType::numBitsNeeded) - 1;
+    static const uint32_t observedTypeMask = (1 << ObservedType::numBitsNeeded) - 1;
+public:
+    static const uint32_t specialFastPathBit = 1 << (lhsObservedTypeShift + ObservedType::numBitsNeeded);
+    static_assert((lhsObservedTypeShift + ObservedType::numBitsNeeded) <= (sizeof(uint32_t) * 8) - 1, "Should fit in a uint32_t.");
+    static_assert(!(specialFastPathBit & ~clearLhsObservedTypeBitMask), "These bits should not intersect.");
+    static_assert(specialFastPathBit & clearLhsObservedTypeBitMask, "These bits should intersect.");
+    static_assert(specialFastPathBit > ~clearLhsObservedTypeBitMask, "These bits should not intersect and specialFastPathBit should be a higher bit.");
+
+    ArithProfile(ResultType arg)
+    {
+        m_bits = (arg.bits() << lhsResultTypeShift);
+        ASSERT(lhsResultType().bits() == arg.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
+    }
+
+    ArithProfile(ResultType lhs, ResultType rhs)
+    {
+        m_bits = (lhs.bits() << lhsResultTypeShift) | (rhs.bits() << rhsResultTypeShift);
+        ASSERT(lhsResultType().bits() == lhs.bits() && rhsResultType().bits() == rhs.bits());
+        ASSERT(lhsObservedType().isEmpty());
+        ASSERT(rhsObservedType().isEmpty());
+    }
+    ArithProfile() = default;
+
+    static ArithProfile fromInt(uint32_t bits)
+    {
+        ArithProfile result;
+        result.m_bits = bits;
+        return result;
+    }
+
+    enum ObservedResults {
+        NonNegZeroDouble = 1 << 0,
+        NegZeroDouble    = 1 << 1,
+        NonNumber        = 1 << 2,
+        Int32Overflow    = 1 << 3,
+        Int52Overflow    = 1 << 4,
+    };
+
+    ResultType lhsResultType() const { return ResultType((m_bits >> lhsResultTypeShift) & resultTypeMask); }
+    ResultType rhsResultType() const { return ResultType((m_bits >> rhsResultTypeShift) & resultTypeMask); }
+
+    ObservedType lhsObservedType() const { return ObservedType((m_bits >> lhsObservedTypeShift) & observedTypeMask); }
+    ObservedType rhsObservedType() const { return ObservedType((m_bits >> rhsObservedTypeShift) & observedTypeMask); }
+    void setLhsObservedType(ObservedType type)
+    {
+        uint32_t bits = m_bits;
+        bits &= clearLhsObservedTypeBitMask;
+        bits |= type.bits() << lhsObservedTypeShift;
+        m_bits = bits;
+        ASSERT(lhsObservedType() == type);
+    }
+
+    void setRhsObservedType(ObservedType type)
+    { 
+        uint32_t bits = m_bits;
+        bits &= clearRhsObservedTypeBitMask;
+        bits |= type.bits() << rhsObservedTypeShift;
+        m_bits = bits;
+        ASSERT(rhsObservedType() == type);
+    }
+
+    bool tookSpecialFastPath() const { return m_bits & specialFastPathBit; }
+
+    bool didObserveNonInt32() const { return hasBits(NonNegZeroDouble | NegZeroDouble | NonNumber); }
+    bool didObserveDouble() const { return hasBits(NonNegZeroDouble | NegZeroDouble); }
+    bool didObserveNonNegZeroDouble() const { return hasBits(NonNegZeroDouble); }
+    bool didObserveNegZeroDouble() const { return hasBits(NegZeroDouble); }
+    bool didObserveNonNumber() const { return hasBits(NonNumber); }
+    bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); }
+    bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); }
+
+    void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); }
+    void setObservedNegZeroDouble() { setBit(NegZeroDouble); }
+    void setObservedNonNumber() { setBit(NonNumber); }
+    void setObservedInt32Overflow() { setBit(Int32Overflow); }
+    void setObservedInt52Overflow() { setBit(Int52Overflow); }
+
+    const void* addressOfBits() const { return &m_bits; }
+
+    void observeResult(JSValue value)
+    {
+        if (value.isInt32())
+            return;
+        if (value.isNumber()) {
+            m_bits |= Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble;
+            return;
+        }
+        m_bits |= NonNumber;
+    }
+
+    void lhsSawInt32() { setLhsObservedType(lhsObservedType().withInt32()); }
+    void lhsSawNumber() { setLhsObservedType(lhsObservedType().withNumber()); }
+    void lhsSawNonNumber() { setLhsObservedType(lhsObservedType().withNonNumber()); }
+    void rhsSawInt32() { setRhsObservedType(rhsObservedType().withInt32()); }
+    void rhsSawNumber() { setRhsObservedType(rhsObservedType().withNumber()); }
+    void rhsSawNonNumber() { setRhsObservedType(rhsObservedType().withNonNumber()); }
+
+    void observeLHS(JSValue lhs)
+    {
+        ArithProfile newProfile = *this;
+        if (lhs.isNumber()) {
+            if (lhs.isInt32())
+                newProfile.lhsSawInt32();
+            else
+                newProfile.lhsSawNumber();
+        } else
+            newProfile.lhsSawNonNumber();
+
+        m_bits = newProfile.bits();
+    }
+
+    void observeLHSAndRHS(JSValue lhs, JSValue rhs)
+    {
+        observeLHS(lhs);
+
+        ArithProfile newProfile = *this;
+        if (rhs.isNumber()) {
+            if (rhs.isInt32())
+                newProfile.rhsSawInt32();
+            else
+                newProfile.rhsSawNumber();
+        } else
+            newProfile.rhsSawNonNumber();
+
+        m_bits = newProfile.bits();
+    }
+
+#if ENABLE(JIT)    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble) if it sees a
+    // double. Sets NonNumber if it sees a non-number.
+    void emitObserveResult(CCallHelpers&, JSValueRegs, TagRegistersMode = HaveTagRegisters);
+    
+    // Sets (Int32Overflow | Int52Overflow | NonNegZeroDouble | NegZeroDouble).
+    bool shouldEmitSetDouble() const;
+    void emitSetDouble(CCallHelpers&) const;
+    
+    // Sets NonNumber.
+    void emitSetNonNumber(CCallHelpers&) const;
+    bool shouldEmitSetNonNumber() const;
+#endif // ENABLE(JIT)
+
+    uint32_t bits() const { return m_bits; }
+
+private:
+    bool hasBits(int mask) const { return m_bits & mask; }
+    void setBit(int mask) { m_bits |= mask; }
+
+    uint32_t m_bits { 0 }; // We take care to update m_bits only in a single operation. We don't ever store an inconsistent bit representation to it.
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, const JSC::ArithProfile&);
+void printInternal(PrintStream&, const JSC::ObservedType&);
+
+} // namespace WTF
diff --git a/bytecode/ArrayAllocationProfile.cpp b/bytecode/ArrayAllocationProfile.cpp
new file mode 100644
index 0000000..905b5bd
--- /dev/null
+++ b/bytecode/ArrayAllocationProfile.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ArrayAllocationProfile.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void ArrayAllocationProfile::updateIndexingType()
+{
+    // This is awkwardly racy but totally sound even when executed concurrently. The
+    // worst cases go something like this:
+    //
+    // - Two threads race to execute this code; one of them succeeds in updating the
+    //   m_currentIndexingType and the other either updates it again, or sees a null
+    //   m_lastArray; if it updates it again then at worst it will cause the profile
+    //   to "forget" some array. That's still sound, since we don't promise that
+    //   this profile is a reflection of any kind of truth.
+    //
+    // - A concurrent thread reads m_lastArray, but that array is now dead. While
+    //   it's possible for that array to no longer be reachable, it cannot actually
+    //   be freed, since we require the GC to wait until all concurrent JITing
+    //   finishes.
+    
+    JSArray* lastArray = m_lastArray;
+    if (!lastArray)
+        return;
+    m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType());
+    m_lastArray = 0;
+}
+
+} // namespace JSC
+
diff --git a/bytecode/ArrayAllocationProfile.h b/bytecode/ArrayAllocationProfile.h
new file mode 100644
index 0000000..cf30de6
--- /dev/null
+++ b/bytecode/ArrayAllocationProfile.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "IndexingType.h"
+#include "JSArray.h"
+
+namespace JSC {
+
+class ArrayAllocationProfile {
+public:
+    ArrayAllocationProfile()
+        : m_currentIndexingType(ArrayWithUndecided)
+        , m_lastArray(0)
+    {
+    }
+    
+    IndexingType selectIndexingType()
+    {
+        JSArray* lastArray = m_lastArray;
+        if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType))
+            updateIndexingType();
+        return m_currentIndexingType;
+    }
+    
+    JSArray* updateLastAllocation(JSArray* lastArray)
+    {
+        m_lastArray = lastArray;
+        return lastArray;
+    }
+    
+    JS_EXPORT_PRIVATE void updateIndexingType();
+    
+    static IndexingType selectIndexingTypeFor(ArrayAllocationProfile* profile)
+    {
+        if (!profile)
+            return ArrayWithUndecided;
+        return profile->selectIndexingType();
+    }
+    
+    static JSArray* updateLastAllocationFor(ArrayAllocationProfile* profile, JSArray* lastArray)
+    {
+        if (profile)
+            profile->updateLastAllocation(lastArray);
+        return lastArray;
+    }
+
+private:
+    
+    IndexingType m_currentIndexingType;
+    JSArray* m_lastArray;
+};
+
+} // namespace JSC
diff --git a/bytecode/ArrayProfile.cpp b/bytecode/ArrayProfile.cpp
new file mode 100644
index 0000000..3146b18
--- /dev/null
+++ b/bytecode/ArrayProfile.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ArrayProfile.h"
+
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+void dumpArrayModes(PrintStream& out, ArrayModes arrayModes)
+{
+    if (!arrayModes) {
+        out.print("");
+        return;
+    }
+    
+    if (arrayModes == ALL_ARRAY_MODES) {
+        out.print("TOP");
+        return;
+    }
+    
+    CommaPrinter comma("|");
+    if (arrayModes & asArrayModes(NonArray))
+        out.print(comma, "NonArray");
+    if (arrayModes & asArrayModes(NonArrayWithInt32))
+        out.print(comma, "NonArrayWithInt32");
+    if (arrayModes & asArrayModes(NonArrayWithDouble))
+        out.print(comma, "NonArrayWithDouble");
+    if (arrayModes & asArrayModes(NonArrayWithContiguous))
+        out.print(comma, "NonArrayWithContiguous");
+    if (arrayModes & asArrayModes(NonArrayWithArrayStorage))
+        out.print(comma, "NonArrayWithArrayStorage");
+    if (arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage))
+        out.print(comma, "NonArrayWithSlowPutArrayStorage");
+    if (arrayModes & asArrayModes(ArrayClass))
+        out.print(comma, "ArrayClass");
+    if (arrayModes & asArrayModes(ArrayWithUndecided))
+        out.print(comma, "ArrayWithUndecided");
+    if (arrayModes & asArrayModes(ArrayWithInt32))
+        out.print(comma, "ArrayWithInt32");
+    if (arrayModes & asArrayModes(ArrayWithDouble))
+        out.print(comma, "ArrayWithDouble");
+    if (arrayModes & asArrayModes(ArrayWithContiguous))
+        out.print(comma, "ArrayWithContiguous");
+    if (arrayModes & asArrayModes(ArrayWithArrayStorage))
+        out.print(comma, "ArrayWithArrayStorage");
+    if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage))
+        out.print(comma, "ArrayWithSlowPutArrayStorage");
+
+    if (arrayModes & Int8ArrayMode)
+        out.print(comma, "Int8ArrayMode");
+    if (arrayModes & Int16ArrayMode)
+        out.print(comma, "Int16ArrayMode");
+    if (arrayModes & Int32ArrayMode)
+        out.print(comma, "Int32ArrayMode");
+    if (arrayModes & Uint8ArrayMode)
+        out.print(comma, "Uint8ArrayMode");
+    if (arrayModes & Uint8ClampedArrayMode)
+        out.print(comma, "Uint8ClampedArrayMode");
+    if (arrayModes & Uint16ArrayMode)
+        out.print(comma, "Uint16ArrayMode");
+    if (arrayModes & Uint32ArrayMode)
+        out.print(comma, "Uint32ArrayMode");
+    if (arrayModes & Float32ArrayMode)
+        out.print(comma, "Float32ArrayMode");
+    if (arrayModes & Float64ArrayMode)
+        out.print(comma, "Float64ArrayMode");
+}
+
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
+{
+    if (!m_lastSeenStructureID)
+        return;
+    
+    Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID);
+    computeUpdatedPrediction(locker, codeBlock, lastSeenStructure);
+    m_lastSeenStructureID = 0;
+}
+
+void ArrayProfile::computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure)
+{
+    m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure);
+    
+    if (!m_didPerformFirstRunPruning
+        && hasTwoOrMoreBitsSet(m_observedArrayModes)) {
+        m_observedArrayModes = arrayModeFromStructure(lastSeenStructure);
+        m_didPerformFirstRunPruning = true;
+    }
+    
+    m_mayInterceptIndexedAccesses |=
+        lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero();
+    JSGlobalObject* globalObject = codeBlock->globalObject();
+    if (!globalObject->isOriginalArrayStructure(lastSeenStructure)
+        && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure))
+        m_usesOriginalArrayStructures = false;
+}
+
+CString ArrayProfile::briefDescription(const ConcurrentJSLocker& locker, CodeBlock* codeBlock)
+{
+    computeUpdatedPrediction(locker, codeBlock);
+    return briefDescriptionWithoutUpdating(locker);
+}
+
+CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJSLocker&)
+{
+    StringPrintStream out;
+    
+    bool hasPrinted = false;
+    
+    if (m_observedArrayModes) {
+        if (hasPrinted)
+            out.print(", ");
+        out.print(ArrayModesDump(m_observedArrayModes));
+        hasPrinted = true;
+    }
+    
+    if (m_mayStoreToHole) {
+        if (hasPrinted)
+            out.print(", ");
+        out.print("Hole");
+        hasPrinted = true;
+    }
+    
+    if (m_outOfBounds) {
+        if (hasPrinted)
+            out.print(", ");
+        out.print("OutOfBounds");
+        hasPrinted = true;
+    }
+    
+    if (m_mayInterceptIndexedAccesses) {
+        if (hasPrinted)
+            out.print(", ");
+        out.print("Intercept");
+        hasPrinted = true;
+    }
+    
+    if (m_usesOriginalArrayStructures) {
+        if (hasPrinted)
+            out.print(", ");
+        out.print("Original");
+        hasPrinted = true;
+    }
+    
+    UNUSED_PARAM(hasPrinted);
+    
+    return out.toCString();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/ArrayProfile.h b/bytecode/ArrayProfile.h
new file mode 100644
index 0000000..279906d
--- /dev/null
+++ b/bytecode/ArrayProfile.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ConcurrentJSLock.h"
+#include "JSArray.h"
+#include "Structure.h"
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+class LLIntOffsetsExtractor;
+
+// This is a bitfield where each bit represents an type of array access that we have seen.
+// There are 16 indexing types that use the lower bits.
+// There are 9 typed array types taking the bits 16 to 25.
+typedef unsigned ArrayModes;
+
+const ArrayModes Int8ArrayMode = 1 << 16;
+const ArrayModes Int16ArrayMode = 1 << 17;
+const ArrayModes Int32ArrayMode = 1 << 18;
+const ArrayModes Uint8ArrayMode = 1 << 19;
+const ArrayModes Uint8ClampedArrayMode = 1 << 20;
+const ArrayModes Uint16ArrayMode = 1 << 21;
+const ArrayModes Uint32ArrayMode = 1 << 22;
+const ArrayModes Float32ArrayMode = 1 << 23;
+const ArrayModes Float64ArrayMode = 1 << 24;
+
+#define asArrayModes(type) \
+    (static_cast(1) << static_cast(type))
+
+#define ALL_TYPED_ARRAY_MODES \
+    (Int8ArrayMode            \
+    | Int16ArrayMode          \
+    | Int32ArrayMode          \
+    | Uint8ArrayMode          \
+    | Uint8ClampedArrayMode   \
+    | Uint16ArrayMode         \
+    | Uint32ArrayMode         \
+    | Float32ArrayMode        \
+    | Float64ArrayMode        \
+    )
+
+#define ALL_NON_ARRAY_ARRAY_MODES                       \
+    (asArrayModes(NonArray)                             \
+    | asArrayModes(NonArrayWithInt32)                   \
+    | asArrayModes(NonArrayWithDouble)                  \
+    | asArrayModes(NonArrayWithContiguous)              \
+    | asArrayModes(NonArrayWithArrayStorage)            \
+    | asArrayModes(NonArrayWithSlowPutArrayStorage)     \
+    | ALL_TYPED_ARRAY_MODES)
+
+#define ALL_ARRAY_ARRAY_MODES                           \
+    (asArrayModes(ArrayClass)                           \
+    | asArrayModes(ArrayWithUndecided)                  \
+    | asArrayModes(ArrayWithInt32)                      \
+    | asArrayModes(ArrayWithDouble)                     \
+    | asArrayModes(ArrayWithContiguous)                 \
+    | asArrayModes(ArrayWithArrayStorage)               \
+    | asArrayModes(ArrayWithSlowPutArrayStorage))
+
+#define ALL_ARRAY_MODES (ALL_NON_ARRAY_ARRAY_MODES | ALL_ARRAY_ARRAY_MODES)
+
+inline ArrayModes arrayModeFromStructure(Structure* structure)
+{
+    switch (structure->classInfo()->typedArrayStorageType) {
+    case TypeInt8:
+        return Int8ArrayMode;
+    case TypeUint8:
+        return Uint8ArrayMode;
+    case TypeUint8Clamped:
+        return Uint8ClampedArrayMode;
+    case TypeInt16:
+        return Int16ArrayMode;
+    case TypeUint16:
+        return Uint16ArrayMode;
+    case TypeInt32:
+        return Int32ArrayMode;
+    case TypeUint32:
+        return Uint32ArrayMode;
+    case TypeFloat32:
+        return Float32ArrayMode;
+    case TypeFloat64:
+        return Float64ArrayMode;
+    case TypeDataView:
+    case NotTypedArray:
+        break;
+    }
+    return asArrayModes(structure->indexingType());
+}
+
+void dumpArrayModes(PrintStream&, ArrayModes);
+MAKE_PRINT_ADAPTOR(ArrayModesDump, ArrayModes, dumpArrayModes);
+
+inline bool mergeArrayModes(ArrayModes& left, ArrayModes right)
+{
+    ArrayModes newModes = left | right;
+    if (newModes == left)
+        return false;
+    left = newModes;
+    return true;
+}
+
+inline bool arrayModesAreClearOrTop(ArrayModes modes)
+{
+    return !modes || modes == ALL_ARRAY_MODES;
+}
+
+// Checks if proven is a subset of expected.
+inline bool arrayModesAlreadyChecked(ArrayModes proven, ArrayModes expected)
+{
+    return (expected | proven) == expected;
+}
+
+inline bool arrayModesInclude(ArrayModes arrayModes, IndexingType shape)
+{
+    return !!(arrayModes & (asArrayModes(NonArray | shape) | asArrayModes(ArrayClass | shape)));
+}
+
+inline bool shouldUseSlowPutArrayStorage(ArrayModes arrayModes)
+{
+    return arrayModesInclude(arrayModes, SlowPutArrayStorageShape);
+}
+
+inline bool shouldUseFastArrayStorage(ArrayModes arrayModes)
+{
+    return arrayModesInclude(arrayModes, ArrayStorageShape);
+}
+
+inline bool shouldUseContiguous(ArrayModes arrayModes)
+{
+    return arrayModesInclude(arrayModes, ContiguousShape);
+}
+
+inline bool shouldUseDouble(ArrayModes arrayModes)
+{
+    return arrayModesInclude(arrayModes, DoubleShape);
+}
+
+inline bool shouldUseInt32(ArrayModes arrayModes)
+{
+    return arrayModesInclude(arrayModes, Int32Shape);
+}
+
+inline bool hasSeenArray(ArrayModes arrayModes)
+{
+    return arrayModes & ALL_ARRAY_ARRAY_MODES;
+}
+
+inline bool hasSeenNonArray(ArrayModes arrayModes)
+{
+    return arrayModes & ALL_NON_ARRAY_ARRAY_MODES;
+}
+
+class ArrayProfile {
+public:
+    ArrayProfile()
+        : m_bytecodeOffset(std::numeric_limits::max())
+        , m_lastSeenStructureID(0)
+        , m_mayStoreToHole(false)
+        , m_outOfBounds(false)
+        , m_mayInterceptIndexedAccesses(false)
+        , m_usesOriginalArrayStructures(true)
+        , m_didPerformFirstRunPruning(false)
+        , m_observedArrayModes(0)
+    {
+    }
+    
+    ArrayProfile(unsigned bytecodeOffset)
+        : m_bytecodeOffset(bytecodeOffset)
+        , m_lastSeenStructureID(0)
+        , m_mayStoreToHole(false)
+        , m_outOfBounds(false)
+        , m_mayInterceptIndexedAccesses(false)
+        , m_usesOriginalArrayStructures(true)
+        , m_didPerformFirstRunPruning(false)
+        , m_observedArrayModes(0)
+    {
+    }
+    
+    unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+    
+    StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; }
+    ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; }
+    bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; }
+
+    void setOutOfBounds() { m_outOfBounds = true; }
+    bool* addressOfOutOfBounds() { return &m_outOfBounds; }
+    
+    void observeStructure(Structure* structure)
+    {
+        m_lastSeenStructureID = structure->id();
+    }
+    
+    void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*);
+    void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*, Structure* lastSeenStructure);
+    
+    ArrayModes observedArrayModes(const ConcurrentJSLocker&) const { return m_observedArrayModes; }
+    bool mayInterceptIndexedAccesses(const ConcurrentJSLocker&) const { return m_mayInterceptIndexedAccesses; }
+    
+    bool mayStoreToHole(const ConcurrentJSLocker&) const { return m_mayStoreToHole; }
+    bool outOfBounds(const ConcurrentJSLocker&) const { return m_outOfBounds; }
+    
+    bool usesOriginalArrayStructures(const ConcurrentJSLocker&) const { return m_usesOriginalArrayStructures; }
+    
+    CString briefDescription(const ConcurrentJSLocker&, CodeBlock*);
+    CString briefDescriptionWithoutUpdating(const ConcurrentJSLocker&);
+    
+private:
+    friend class LLIntOffsetsExtractor;
+    
+    static Structure* polymorphicStructure() { return static_cast(reinterpret_cast(1)); }
+    
+    unsigned m_bytecodeOffset;
+    StructureID m_lastSeenStructureID;
+    bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies.
+    bool m_outOfBounds;
+    bool m_mayInterceptIndexedAccesses : 1;
+    bool m_usesOriginalArrayStructures : 1;
+    bool m_didPerformFirstRunPruning : 1;
+    ArrayModes m_observedArrayModes;
+};
+
+typedef SegmentedVector ArrayProfileVector;
+
+} // namespace JSC
diff --git a/bytecode/ByValInfo.h b/bytecode/ByValInfo.h
new file mode 100644
index 0000000..e5fa708
--- /dev/null
+++ b/bytecode/ByValInfo.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ClassInfo.h"
+#include "CodeLocation.h"
+#include "CodeOrigin.h"
+#include "IndexingType.h"
+#include "JITStubRoutine.h"
+#include "Structure.h"
+
+namespace JSC {
+
+class Symbol;
+
+#if ENABLE(JIT)
+
+class StructureStubInfo;
+
+enum JITArrayMode {
+    JITInt32,
+    JITDouble,
+    JITContiguous,
+    JITArrayStorage,
+    JITDirectArguments,
+    JITScopedArguments,
+    JITInt8Array,
+    JITInt16Array,
+    JITInt32Array,
+    JITUint8Array,
+    JITUint8ClampedArray,
+    JITUint16Array,
+    JITUint32Array,
+    JITFloat32Array,
+    JITFloat64Array
+};
+
+inline bool isOptimizableIndexingType(IndexingType indexingType)
+{
+    switch (indexingType) {
+    case ALL_INT32_INDEXING_TYPES:
+    case ALL_DOUBLE_INDEXING_TYPES:
+    case ALL_CONTIGUOUS_INDEXING_TYPES:
+    case ARRAY_WITH_ARRAY_STORAGE_INDEXING_TYPES:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool hasOptimizableIndexingForJSType(JSType type)
+{
+    switch (type) {
+    case DirectArgumentsType:
+    case ScopedArgumentsType:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo)
+{
+    return isTypedView(classInfo->typedArrayStorageType);
+}
+
+inline bool hasOptimizableIndexing(Structure* structure)
+{
+    return isOptimizableIndexingType(structure->indexingType())
+        || hasOptimizableIndexingForJSType(structure->typeInfo().type())
+        || hasOptimizableIndexingForClassInfo(structure->classInfo());
+}
+
+inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType)
+{
+    switch (indexingType) {
+    case ALL_INT32_INDEXING_TYPES:
+        return JITInt32;
+    case ALL_DOUBLE_INDEXING_TYPES:
+        return JITDouble;
+    case ALL_CONTIGUOUS_INDEXING_TYPES:
+        return JITContiguous;
+    case ARRAY_WITH_ARRAY_STORAGE_INDEXING_TYPES:
+        return JITArrayStorage;
+    default:
+        CRASH();
+        return JITContiguous;
+    }
+}
+
+inline JITArrayMode jitArrayModeForJSType(JSType type)
+{
+    switch (type) {
+    case DirectArgumentsType:
+        return JITDirectArguments;
+    case ScopedArgumentsType:
+        return JITScopedArguments;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return JITContiguous;
+    }
+}
+
+inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo)
+{
+    switch (classInfo->typedArrayStorageType) {
+    case TypeInt8:
+        return JITInt8Array;
+    case TypeInt16:
+        return JITInt16Array;
+    case TypeInt32:
+        return JITInt32Array;
+    case TypeUint8:
+        return JITUint8Array;
+    case TypeUint8Clamped:
+        return JITUint8ClampedArray;
+    case TypeUint16:
+        return JITUint16Array;
+    case TypeUint32:
+        return JITUint32Array;
+    case TypeFloat32:
+        return JITFloat32Array;
+    case TypeFloat64:
+        return JITFloat64Array;
+    default:
+        CRASH();
+        return JITContiguous;
+    }
+}
+
+inline bool jitArrayModePermitsPut(JITArrayMode mode)
+{
+    switch (mode) {
+    case JITDirectArguments:
+    case JITScopedArguments:
+        // We could support put_by_val on these at some point, but it's just not that profitable
+        // at the moment.
+        return false;
+    default:
+        return true;
+    }
+}
+
+inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode)
+{
+    switch (mode) {
+    case JITInt8Array:
+        return TypeInt8;
+    case JITInt16Array:
+        return TypeInt16;
+    case JITInt32Array:
+        return TypeInt32;
+    case JITUint8Array:
+        return TypeUint8;
+    case JITUint8ClampedArray:
+        return TypeUint8Clamped;
+    case JITUint16Array:
+        return TypeUint16;
+    case JITUint32Array:
+        return TypeUint32;
+    case JITFloat32Array:
+        return TypeFloat32;
+    case JITFloat64Array:
+        return TypeFloat64;
+    default:
+        CRASH();
+        return NotTypedArray;
+    }
+}
+
+inline JITArrayMode jitArrayModeForStructure(Structure* structure)
+{
+    if (isOptimizableIndexingType(structure->indexingType()))
+        return jitArrayModeForIndexingType(structure->indexingType());
+    
+    if (hasOptimizableIndexingForJSType(structure->typeInfo().type()))
+        return jitArrayModeForJSType(structure->typeInfo().type());
+    
+    ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo()));
+    return jitArrayModeForClassInfo(structure->classInfo());
+}
+
+struct ByValInfo {
+    ByValInfo() { }
+
+    ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, CodeLocationLabel exceptionHandler, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath)
+        : bytecodeIndex(bytecodeIndex)
+        , notIndexJump(notIndexJump)
+        , badTypeJump(badTypeJump)
+        , exceptionHandler(exceptionHandler)
+        , arrayMode(arrayMode)
+        , arrayProfile(arrayProfile)
+        , badTypeJumpToDone(badTypeJumpToDone)
+        , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath)
+        , returnAddressToSlowPath(returnAddressToSlowPath)
+        , slowPathCount(0)
+        , stubInfo(nullptr)
+        , tookSlowPath(false)
+        , seen(false)
+    {
+    }
+
+    unsigned bytecodeIndex;
+    CodeLocationJump notIndexJump;
+    CodeLocationJump badTypeJump;
+    CodeLocationLabel exceptionHandler;
+    JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code.
+    ArrayProfile* arrayProfile;
+    int16_t badTypeJumpToDone;
+    int16_t badTypeJumpToNextHotPath;
+    int16_t returnAddressToSlowPath;
+    unsigned slowPathCount;
+    RefPtr stubRoutine;
+    Identifier cachedId;
+    WriteBarrier cachedSymbol;
+    StructureStubInfo* stubInfo;
+    bool tookSlowPath : 1;
+    bool seen : 1;
+};
+
+inline unsigned getByValInfoBytecodeIndex(ByValInfo* info)
+{
+    return info->bytecodeIndex;
+}
+
+typedef HashMap ByValInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap ByValInfoMap;
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
diff --git a/bytecode/BytecodeBasicBlock.cpp b/bytecode/BytecodeBasicBlock.cpp
new file mode 100644
index 0000000..47c481d
--- /dev/null
+++ b/bytecode/BytecodeBasicBlock.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeBasicBlock.h"
+
+#include "CodeBlock.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+void BytecodeBasicBlock::shrinkToFit()
+{
+    m_offsets.shrinkToFit();
+    m_successors.shrinkToFit();
+}
+
+static bool isJumpTarget(OpcodeID opcodeID, const Vector& jumpTargets, unsigned bytecodeOffset)
+{
+    if (opcodeID == op_catch)
+        return true;
+
+    return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset);
+}
+
+template
+void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
+{
+    Vector jumpTargets;
+    computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets);
+
+    auto appendBlock = [&] (std::unique_ptr&& block) {
+        block->m_index = basicBlocks.size();
+        basicBlocks.append(WTFMove(block));
+    };
+
+    auto linkBlocks = [&] (BytecodeBasicBlock* from, BytecodeBasicBlock* to) {
+        from->addSuccessor(to);
+    };
+
+    // Create the entry and exit basic blocks.
+    basicBlocks.reserveCapacity(jumpTargets.size() + 2);
+
+    auto entry = std::make_unique(BytecodeBasicBlock::EntryBlock);
+    auto firstBlock = std::make_unique(0, 0);
+    linkBlocks(entry.get(), firstBlock.get());
+
+    appendBlock(WTFMove(entry));
+    BytecodeBasicBlock* current = firstBlock.get();
+    appendBlock(WTFMove(firstBlock));
+
+    auto exit = std::make_unique(BytecodeBasicBlock::ExitBlock);
+
+    bool nextInstructionIsLeader = false;
+
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
+        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+        unsigned opcodeLength = opcodeLengths[opcodeID];
+
+        bool createdBlock = false;
+        // If the current bytecode is a jump target, then it's the leader of its own basic block.
+        if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) {
+            auto newBlock = std::make_unique(bytecodeOffset, opcodeLength);
+            current = newBlock.get();
+            appendBlock(WTFMove(newBlock));
+            createdBlock = true;
+            nextInstructionIsLeader = false;
+            bytecodeOffset += opcodeLength;
+        }
+
+        // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block.
+        if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID))
+            nextInstructionIsLeader = true;
+
+        if (createdBlock)
+            continue;
+
+        // Otherwise, just add to the length of the current block.
+        current->addLength(opcodeLength);
+        bytecodeOffset += opcodeLength;
+    }
+
+    // Link basic blocks together.
+    for (unsigned i = 0; i < basicBlocks.size(); i++) {
+        BytecodeBasicBlock* block = basicBlocks[i].get();
+
+        if (block->isEntryBlock() || block->isExitBlock())
+            continue;
+
+        bool fallsThrough = true; 
+        for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
+            OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+            unsigned opcodeLength = opcodeLengths[opcodeID];
+            // If we found a terminal bytecode, link to the exit block.
+            if (isTerminal(opcodeID)) {
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+                linkBlocks(block, exit.get());
+                fallsThrough = false;
+                break;
+            }
+
+            // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. 
+            // If there isn't one, treat this throw as a terminal. This is true even if we have a finally
+            // block because the finally block will create its own catch, which will generate a HandlerInfo.
+            if (isThrow(opcodeID)) {
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+                auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset);
+                fallsThrough = false;
+                if (!handler) {
+                    linkBlocks(block, exit.get());
+                    break;
+                }
+                for (unsigned i = 0; i < basicBlocks.size(); i++) {
+                    BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
+                    if (handler->target == otherBlock->leaderOffset()) {
+                        linkBlocks(block, otherBlock);
+                        break;
+                    }
+                }
+                break;
+            }
+
+            // If we found a branch, link to the block(s) that we jump to.
+            if (isBranch(opcodeID)) {
+                ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength());
+                Vector bytecodeOffsetsJumpedTo;
+                findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo);
+
+                size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size();
+                ASSERT(numberOfJumpTargets);
+                for (unsigned i = 0; i < basicBlocks.size(); i++) {
+                    BytecodeBasicBlock* otherBlock = basicBlocks[i].get();
+                    if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderOffset())) {
+                        linkBlocks(block, otherBlock);
+                        --numberOfJumpTargets;
+                        if (!numberOfJumpTargets)
+                            break;
+                    }
+                }
+                // numberOfJumpTargets may not be 0 here if there are multiple jumps targeting the same
+                // basic blocks (e.g. in a switch type opcode). Since we only decrement numberOfJumpTargets
+                // once per basic block, the duplicates are not accounted for. For our purpose here,
+                // that doesn't matter because we only need to link to the target block once regardless
+                // of how many ways this block can jump there.
+
+                if (isUnconditionalBranch(opcodeID))
+                    fallsThrough = false;
+
+                break;
+            }
+            bytecodeOffset += opcodeLength;
+        }
+
+        // If we fall through then link to the next block in program order.
+        if (fallsThrough) {
+            ASSERT(i + 1 < basicBlocks.size());
+            BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get();
+            linkBlocks(block, nextBlock);
+        }
+    }
+
+    appendBlock(WTFMove(exit));
+    
+    for (auto& basicBlock : basicBlocks)
+        basicBlock->shrinkToFit();
+}
+
+void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
+{
+    computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
+}
+
+void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks)
+{
+    BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeBasicBlock.h b/bytecode/BytecodeBasicBlock.h
new file mode 100644
index 0000000..fb81650
--- /dev/null
+++ b/bytecode/BytecodeBasicBlock.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+class UnlinkedCodeBlock;
+struct Instruction;
+struct UnlinkedInstruction;
+
+class BytecodeBasicBlock {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    enum SpecialBlockType { EntryBlock, ExitBlock };
+    BytecodeBasicBlock(unsigned start, unsigned length);
+    BytecodeBasicBlock(SpecialBlockType);
+    void shrinkToFit();
+
+    bool isEntryBlock() { return !m_leaderOffset && !m_totalLength; }
+    bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; }
+
+    unsigned leaderOffset() { return m_leaderOffset; }
+    unsigned totalLength() { return m_totalLength; }
+
+    const Vector& offsets() const { return m_offsets; }
+
+    const Vector& successors() const { return m_successors; }
+
+    FastBitVector& in() { return m_in; }
+    FastBitVector& out() { return m_out; }
+
+    unsigned index() const { return m_index; }
+
+    static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector>&);
+    static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector>&);
+
+private:
+    template static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector>& basicBlocks);
+
+    void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); }
+
+    void addLength(unsigned);
+
+    unsigned m_leaderOffset;
+    unsigned m_totalLength;
+    unsigned m_index;
+
+    Vector m_offsets;
+    Vector m_successors;
+
+    FastBitVector m_in;
+    FastBitVector m_out;
+};
+
+inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length)
+    : m_leaderOffset(start)
+    , m_totalLength(length)
+{
+    m_offsets.append(m_leaderOffset);
+}
+
+inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockType blockType)
+    : m_leaderOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+    , m_totalLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX)
+{
+}
+
+inline void BytecodeBasicBlock::addLength(unsigned bytecodeLength)
+{
+    m_offsets.append(m_leaderOffset + m_totalLength);
+    m_totalLength += bytecodeLength;
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeConventions.h b/bytecode/BytecodeConventions.h
new file mode 100644
index 0000000..7781378
--- /dev/null
+++ b/bytecode/BytecodeConventions.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+// Register numbers used in bytecode operations have different meaning according to their ranges:
+//      0x80000000-0xFFFFFFFF  Negative indices from the CallFrame pointer are entries in the call frame.
+//      0x00000000-0x3FFFFFFF  Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
+//      0x40000000-0x7FFFFFFF  Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
+static const int FirstConstantRegisterIndex = 0x40000000;
diff --git a/bytecode/BytecodeGeneratorification.cpp b/bytecode/BytecodeGeneratorification.cpp
new file mode 100644
index 0000000..f7e1e9a
--- /dev/null
+++ b/bytecode/BytecodeGeneratorification.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeGeneratorification.h"
+
+#include "BytecodeLivenessAnalysisInlines.h"
+#include "BytecodeRewriter.h"
+#include "BytecodeUseDef.h"
+#include "IdentifierInlines.h"
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include 
+
+namespace JSC {
+
+struct YieldData {
+    size_t point { 0 };
+    int argument { 0 };
+    FastBitVector liveness;
+};
+
+class BytecodeGeneratorification {
+public:
+    typedef Vector Yields;
+
+    BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+        : m_graph(codeBlock, instructions)
+        , m_generatorFrameSymbolTable(*codeBlock->vm(), generatorFrameSymbolTable)
+        , m_generatorFrameSymbolTableIndex(generatorFrameSymbolTableIndex)
+    {
+        for (BytecodeBasicBlock* block : m_graph) {
+            for (unsigned bytecodeOffset : block->offsets()) {
+                const UnlinkedInstruction* pc = &m_graph.instructions()[bytecodeOffset];
+                switch (pc->u.opcode) {
+                case op_enter: {
+                    m_enterPoint = bytecodeOffset;
+                    break;
+                }
+
+                case op_yield: {
+                    unsigned liveCalleeLocalsIndex = pc[2].u.index;
+                    if (liveCalleeLocalsIndex >= m_yields.size())
+                        m_yields.resize(liveCalleeLocalsIndex + 1);
+                    YieldData& data = m_yields[liveCalleeLocalsIndex];
+                    data.point = bytecodeOffset;
+                    data.argument = pc[3].u.operand;
+                    break;
+                }
+
+                default:
+                    break;
+                }
+            }
+        }
+    }
+
+    struct Storage {
+        Identifier identifier;
+        unsigned identifierIndex;
+        ScopeOffset scopeOffset;
+    };
+
+    void run();
+
+    BytecodeGraph& graph() { return m_graph; }
+
+    const Yields& yields() const
+    {
+        return m_yields;
+    }
+
+    Yields& yields()
+    {
+        return m_yields;
+    }
+
+    unsigned enterPoint() const
+    {
+        return m_enterPoint;
+    }
+
+private:
+    Storage storageForGeneratorLocal(unsigned index)
+    {
+        // We assign a symbol to a register. There is one-on-one corresponding between a register and a symbol.
+        // By doing so, we allocate the specific storage to save the given register.
+        // This allow us not to save all the live registers even if the registers are not overwritten from the previous resuming time.
+        // It means that, the register can be retrieved even if the immediate previous op_save does not save it.
+
+        if (m_storages.size() <= index)
+            m_storages.resize(index + 1);
+        if (std::optional storage = m_storages[index])
+            return *storage;
+
+        UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+        Identifier identifier = Identifier::fromUid(PrivateName());
+        unsigned identifierIndex = codeBlock->numberOfIdentifiers();
+        codeBlock->addIdentifier(identifier);
+        ScopeOffset scopeOffset = m_generatorFrameSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+        m_generatorFrameSymbolTable->set(NoLockingNecessary, identifier.impl(), SymbolTableEntry(VarOffset(scopeOffset)));
+
+        Storage storage = {
+            identifier,
+            identifierIndex,
+            scopeOffset
+        };
+        m_storages[index] = storage;
+        return storage;
+    }
+
+    unsigned m_enterPoint { 0 };
+    BytecodeGraph m_graph;
+    Vector> m_storages;
+    Yields m_yields;
+    Strong m_generatorFrameSymbolTable;
+    int m_generatorFrameSymbolTableIndex;
+};
+
+class GeneratorLivenessAnalysis : public BytecodeLivenessPropagation {
+public:
+    GeneratorLivenessAnalysis(BytecodeGeneratorification& generatorification)
+        : m_generatorification(generatorification)
+    {
+    }
+
+    template
+    void computeDefsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+    {
+        JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+    }
+
+    template
+    void computeUsesForBytecodeOffset(UnlinkedCodeBlock* codeBlock, OpcodeID opcodeID, UnlinkedInstruction* instruction, FastBitVector&, const Functor& functor)
+    {
+        JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+    }
+
+    void run()
+    {
+        // Perform modified liveness analysis to determine which locals are live at the merge points.
+        // This produces the conservative results for the question, "which variables should be saved and resumed?".
+
+        runLivenessFixpoint(m_generatorification.graph());
+
+        for (YieldData& data : m_generatorification.yields())
+            data.liveness = getLivenessInfoAtBytecodeOffset(m_generatorification.graph(), data.point + opcodeLength(op_yield));
+    }
+
+private:
+    BytecodeGeneratorification& m_generatorification;
+};
+
+void BytecodeGeneratorification::run()
+{
+    // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively.
+
+    {
+        GeneratorLivenessAnalysis pass(*this);
+        pass.run();
+    }
+
+    UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+    BytecodeRewriter rewriter(m_graph);
+
+    // Setup the global switch for the generator.
+    {
+        unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter);
+        unsigned switchTableIndex = m_graph.codeBlock()->numberOfSwitchJumpTables();
+        VirtualRegister state = virtualRegisterForArgument(static_cast(JSGeneratorFunction::GeneratorArgument::State));
+        auto& jumpTable = m_graph.codeBlock()->addSwitchJumpTable();
+        jumpTable.min = 0;
+        jumpTable.branchOffsets.resize(m_yields.size() + 1);
+        jumpTable.branchOffsets.fill(0);
+        jumpTable.add(0, nextToEnterPoint);
+        for (unsigned i = 0; i < m_yields.size(); ++i)
+            jumpTable.add(i + 1, m_yields[i].point);
+
+        rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) {
+            fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset());
+        });
+    }
+
+    for (const YieldData& data : m_yields) {
+        VirtualRegister scope = virtualRegisterForArgument(static_cast(JSGeneratorFunction::GeneratorArgument::Frame));
+
+        // Emit save sequence.
+        rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+            data.liveness.forEachSetBit([&](size_t index) {
+                VirtualRegister operand = virtualRegisterForLocal(index);
+                Storage storage = storageForGeneratorLocal(index);
+
+                fragment.appendInstruction(
+                    op_put_to_scope,
+                    scope.offset(), // scope
+                    storage.identifierIndex, // identifier
+                    operand.offset(), // value
+                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+                    m_generatorFrameSymbolTableIndex, // symbol table constant index
+                    storage.scopeOffset.offset() // scope offset
+                );
+            });
+
+            // Insert op_ret just after save sequence.
+            fragment.appendInstruction(op_ret, data.argument);
+        });
+
+        // Emit resume sequence.
+        rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) {
+            data.liveness.forEachSetBit([&](size_t index) {
+                VirtualRegister operand = virtualRegisterForLocal(index);
+                Storage storage = storageForGeneratorLocal(index);
+
+                UnlinkedValueProfile profile = codeBlock->addValueProfile();
+                fragment.appendInstruction(
+                    op_get_from_scope,
+                    operand.offset(), // dst
+                    scope.offset(), // scope
+                    storage.identifierIndex, // identifier
+                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
+                    0, // local scope depth
+                    storage.scopeOffset.offset(), // scope offset
+                    profile // profile
+                );
+            });
+        });
+
+        // Clip the unnecessary bytecodes.
+        rewriter.removeBytecode(data.point);
+    }
+
+    rewriter.execute();
+}
+
+void performGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)
+{
+    BytecodeGeneratorification pass(codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex);
+    pass.run();
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeGeneratorification.h b/bytecode/BytecodeGeneratorification.h
new file mode 100644
index 0000000..c7b6137
--- /dev/null
+++ b/bytecode/BytecodeGeneratorification.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class SymbolTable;
+
+void performGeneratorification(UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex);
+
+} // namespace JSC
diff --git a/bytecode/BytecodeGraph.h b/bytecode/BytecodeGraph.h
new file mode 100644
index 0000000..38a13c6
--- /dev/null
+++ b/bytecode/BytecodeGraph.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeBasicBlock.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class BytecodeBasicBlock;
+
+template
+class BytecodeGraph {
+    WTF_MAKE_FAST_ALLOCATED;
+    WTF_MAKE_NONCOPYABLE(BytecodeGraph);
+public:
+    typedef Block CodeBlock;
+    typedef typename Block::Instruction Instruction;
+    typedef Vector> BasicBlocksVector;
+
+    typedef WTF::IndexedContainerIterator> iterator;
+
+    inline BytecodeGraph(Block*, typename Block::UnpackedInstructions&);
+
+    Block* codeBlock() const { return m_codeBlock; }
+
+    typename Block::UnpackedInstructions& instructions() { return m_instructions; }
+
+    WTF::IteratorRange basicBlocksInReverseOrder()
+    {
+        return WTF::makeIteratorRange(m_basicBlocks.rbegin(), m_basicBlocks.rend());
+    }
+
+    static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset)
+    {
+        unsigned leaderOffset = block->leaderOffset();
+        return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalLength();
+    }
+
+    BytecodeBasicBlock* findBasicBlockForBytecodeOffset(unsigned bytecodeOffset)
+    {
+        /*
+            for (unsigned i = 0; i < m_basicBlocks.size(); i++) {
+                if (blockContainsBytecodeOffset(m_basicBlocks[i].get(), bytecodeOffset))
+                    return m_basicBlocks[i].get();
+            }
+            return 0;
+        */
+
+        std::unique_ptr* basicBlock = approximateBinarySearch, unsigned>(m_basicBlocks, m_basicBlocks.size(), bytecodeOffset, [] (std::unique_ptr* basicBlock) { return (*basicBlock)->leaderOffset(); });
+        // We found the block we were looking for.
+        if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset))
+            return (*basicBlock).get();
+
+        // Basic block is to the left of the returned block.
+        if (bytecodeOffset < (*basicBlock)->leaderOffset()) {
+            ASSERT(basicBlock - 1 >= m_basicBlocks.data());
+            ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset));
+            return basicBlock[-1].get();
+        }
+
+        // Basic block is to the right of the returned block.
+        ASSERT(&basicBlock[1] <= &m_basicBlocks.last());
+        ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset));
+        return basicBlock[1].get();
+    }
+
+    BytecodeBasicBlock* findBasicBlockWithLeaderOffset(unsigned leaderOffset)
+    {
+        return (*tryBinarySearch, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr* basicBlock) { return (*basicBlock)->leaderOffset(); })).get();
+    }
+
+    unsigned size() const { return m_basicBlocks.size(); }
+    BytecodeBasicBlock* at(unsigned index) const { return m_basicBlocks[index].get(); }
+    BytecodeBasicBlock* operator[](unsigned index) const { return at(index); }
+
+    iterator begin() const { return iterator(*this, 0); }
+    iterator end() const { return iterator(*this, size()); }
+    BytecodeBasicBlock* first() { return at(0); }
+    BytecodeBasicBlock* last() { return at(size() - 1); }
+
+private:
+    Block* m_codeBlock;
+    BasicBlocksVector m_basicBlocks;
+    typename Block::UnpackedInstructions& m_instructions;
+};
+
+
+template
+BytecodeGraph::BytecodeGraph(Block* codeBlock, typename Block::UnpackedInstructions& instructions)
+    : m_codeBlock(codeBlock)
+    , m_instructions(instructions)
+{
+    ASSERT(m_codeBlock);
+    BytecodeBasicBlock::compute(m_codeBlock, instructions.begin(), instructions.size(), m_basicBlocks);
+    ASSERT(m_basicBlocks.size());
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeIntrinsicRegistry.cpp b/bytecode/BytecodeIntrinsicRegistry.cpp
new file mode 100644
index 0000000..00c9c01
--- /dev/null
+++ b/bytecode/BytecodeIntrinsicRegistry.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeIntrinsicRegistry.h"
+
+#include "ArrayIteratorPrototype.h"
+#include "BuiltinNames.h"
+#include "BytecodeGenerator.h"
+#include "JSCJSValueInlines.h"
+#include "JSGeneratorFunction.h"
+#include "JSModuleLoader.h"
+#include "JSPromise.h"
+#include "Nodes.h"
+#include "StrongInlines.h"
+
+namespace JSC {
+
+#define INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(vm.propertyNames->builtinNames().name##PrivateName().impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name);
+
+BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(VM& vm)
+    : m_vm(vm)
+    , m_bytecodeIntrinsicMap()
+{
+    JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(INITIALIZE_BYTECODE_INTRINSIC_NAMES_TO_SET)
+
+    m_undefined.set(m_vm, jsUndefined());
+    m_Infinity.set(m_vm, jsDoubleNumber(std::numeric_limits::infinity()));
+    m_iterationKindKey.set(m_vm, jsNumber(IterateKey));
+    m_iterationKindValue.set(m_vm, jsNumber(IterateValue));
+    m_iterationKindKeyValue.set(m_vm, jsNumber(IterateKeyValue));
+    m_MAX_ARRAY_INDEX.set(m_vm, jsNumber(MAX_ARRAY_INDEX));
+    m_MAX_STRING_LENGTH.set(m_vm, jsNumber(JSString::MaxLength));
+    m_MAX_SAFE_INTEGER.set(m_vm, jsDoubleNumber(maxSafeInteger()));
+    m_ModuleFetch.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Fetch)));
+    m_ModuleInstantiate.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Instantiate)));
+    m_ModuleSatisfy.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Satisfy)));
+    m_ModuleLink.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Link)));
+    m_ModuleReady.set(m_vm, jsNumber(static_cast(JSModuleLoader::Status::Ready)));
+    m_promiseStatePending.set(m_vm, jsNumber(static_cast(JSPromise::Status::Pending)));
+    m_promiseStateFulfilled.set(m_vm, jsNumber(static_cast(JSPromise::Status::Fulfilled)));
+    m_promiseStateRejected.set(m_vm, jsNumber(static_cast(JSPromise::Status::Rejected)));
+    m_GeneratorResumeModeNormal.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::NormalMode)));
+    m_GeneratorResumeModeThrow.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)));
+    m_GeneratorResumeModeReturn.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)));
+    m_GeneratorStateCompleted.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorState::Completed)));
+    m_GeneratorStateExecuting.set(m_vm, jsNumber(static_cast(JSGeneratorFunction::GeneratorState::Executing)));
+}
+
+BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const
+{
+    if (!m_vm.propertyNames->isPrivateName(ident))
+        return nullptr;
+    auto iterator = m_bytecodeIntrinsicMap.find(ident.impl());
+    if (iterator == m_bytecodeIntrinsicMap.end())
+        return nullptr;
+    return iterator->value;
+}
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) \
+    JSValue BytecodeIntrinsicRegistry::name##Value(BytecodeGenerator&) \
+    { \
+        return m_##name.get(); \
+    }
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+} // namespace JSC
+
diff --git a/bytecode/BytecodeIntrinsicRegistry.h b/bytecode/BytecodeIntrinsicRegistry.h
new file mode 100644
index 0000000..0259bc6
--- /dev/null
+++ b/bytecode/BytecodeIntrinsicRegistry.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Yusuke Suzuki .
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Identifier.h"
+#include 
+
+namespace JSC {
+
+class CommonIdentifiers;
+class BytecodeGenerator;
+class BytecodeIntrinsicNode;
+class RegisterID;
+class Identifier;
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_FUNCTIONS_EACH_NAME(macro) \
+    macro(argument) \
+    macro(argumentCount) \
+    macro(assert) \
+    macro(isObject) \
+    macro(isJSArray) \
+    macro(isProxyObject) \
+    macro(isDerivedArray) \
+    macro(isRegExpObject) \
+    macro(isMap) \
+    macro(isSet) \
+    macro(tailCallForwardArguments) \
+    macro(throwTypeError) \
+    macro(throwRangeError) \
+    macro(throwOutOfMemoryError) \
+    macro(tryGetById) \
+    macro(putByValDirect) \
+    macro(toNumber) \
+    macro(toString) \
+    macro(newArrayWithSize) \
+
+#define JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(macro) \
+    macro(undefined) \
+    macro(Infinity) \
+    macro(iterationKindKey) \
+    macro(iterationKindValue) \
+    macro(iterationKindKeyValue) \
+    macro(MAX_ARRAY_INDEX) \
+    macro(MAX_STRING_LENGTH) \
+    macro(MAX_SAFE_INTEGER) \
+    macro(ModuleFetch) \
+    macro(ModuleTranslate) \
+    macro(ModuleInstantiate) \
+    macro(ModuleSatisfy) \
+    macro(ModuleLink) \
+    macro(ModuleReady) \
+    macro(promiseStatePending) \
+    macro(promiseStateFulfilled) \
+    macro(promiseStateRejected) \
+    macro(GeneratorResumeModeNormal) \
+    macro(GeneratorResumeModeThrow) \
+    macro(GeneratorResumeModeReturn) \
+    macro(GeneratorStateCompleted) \
+    macro(GeneratorStateExecuting) \
+
+
+class BytecodeIntrinsicRegistry {
+    WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry);
+public:
+    explicit BytecodeIntrinsicRegistry(VM&);
+
+    typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*);
+
+    EmitterType lookup(const Identifier&) const;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) JSValue name##Value(BytecodeGenerator&);
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+
+private:
+    VM& m_vm;
+    HashMap, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap;
+
+#define JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS(name) Strong m_##name;
+    JSC_COMMON_BYTECODE_INTRINSIC_CONSTANTS_EACH_NAME(JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS)
+#undef JSC_DECLARE_BYTECODE_INTRINSIC_CONSTANT_GENERATORS
+};
+
+} // namespace JSC
diff --git a/bytecode/BytecodeKills.h b/bytecode/BytecodeKills.h
new file mode 100644
index 0000000..dbdd44d
--- /dev/null
+++ b/bytecode/BytecodeKills.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include 
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+
+class BytecodeKills {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    BytecodeKills()
+        : m_codeBlock(nullptr)
+    {
+    }
+    
+    // By convention, we say that non-local operands are never killed.
+    bool operandIsKilled(unsigned bytecodeIndex, int operand) const
+    {
+        ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+        VirtualRegister reg(operand);
+        if (reg.isLocal())
+            return m_killSets[bytecodeIndex].contains(operand);
+        return false;
+    }
+    
+    bool operandIsKilled(Instruction* instruction, int operand) const
+    {
+        return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand);
+    }
+    
+    template
+    void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const
+    {
+        ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());
+        m_killSets[bytecodeIndex].forEachLocal(
+            [&] (unsigned local) {
+                functor(virtualRegisterForLocal(local));
+            });
+    }
+    
+    template
+    void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const
+    {
+        forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor);
+    }
+    
+private:
+    friend class BytecodeLivenessAnalysis;
+
+    class KillSet {
+    public:
+        KillSet()
+            : m_word(0)
+        {
+        }
+
+        ~KillSet()
+        {
+            if (hasVector())
+                delete vector();
+        }
+        
+        void add(unsigned local)
+        {
+            if (isEmpty()) {
+                setOneItem(local);
+                return;
+            }
+            if (hasOneItem()) {
+                ASSERT(oneItem() != local);
+                Vector* vector = new Vector();
+                vector->append(oneItem());
+                vector->append(local);
+                setVector(vector);
+                return;
+            }
+            ASSERT(!vector()->contains(local));
+            vector()->append(local);
+        }
+        
+        template
+        void forEachLocal(const Functor& functor)
+        {
+            if (isEmpty())
+                return;
+            if (hasOneItem()) {
+                functor(oneItem());
+                return;
+            }
+            for (unsigned local : *vector())
+                functor(local);
+        }
+        
+        bool contains(unsigned expectedLocal)
+        {
+            if (isEmpty())
+                return false;
+            if (hasOneItem())
+                return oneItem() == expectedLocal;
+            for (unsigned local : *vector()) {
+                if (local == expectedLocal)
+                    return true;
+            }
+            return false;
+        }
+        
+    private:
+        bool isEmpty() const
+        {
+            return !m_word;
+        }
+        
+        bool hasOneItem() const
+        {
+            return m_word & 1;
+        }
+        
+        unsigned oneItem() const
+        {
+            return m_word >> 1;
+        }
+        
+        void setOneItem(unsigned value)
+        {
+            m_word = (value << 1) | 1;
+        }
+        
+        bool hasVector() const
+        {
+            return !isEmpty() && !hasOneItem();
+        }
+        
+        Vector* vector()
+        {
+            return bitwise_cast*>(m_word);
+        }
+        
+        void setVector(Vector* value)
+        {
+            m_word = bitwise_cast(value);
+        }
+        
+        uintptr_t m_word;
+    };
+    
+    CodeBlock* m_codeBlock;
+    std::unique_ptr m_killSets;
+};
+
+} // namespace JSC
diff --git a/bytecode/BytecodeList.json b/bytecode/BytecodeList.json
new file mode 100644
index 0000000..fafded7
--- /dev/null
+++ b/bytecode/BytecodeList.json
@@ -0,0 +1,200 @@
+[
+    {
+        "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true, 
+        "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", 
+        "bytecodes" : [
+            { "name" : "op_enter", "length" : 1 },
+            { "name" : "op_get_scope", "length" : 2 },
+            { "name" : "op_create_direct_arguments", "length" : 2 },
+            { "name" : "op_create_scoped_arguments", "length" : 3 },
+            { "name" : "op_create_cloned_arguments", "length" : 2 },
+            { "name" : "op_create_this", "length" : 5 },
+            { "name" : "op_get_argument", "length" : 4 },
+            { "name" : "op_argument_count", "length" : 2 },
+            { "name" : "op_to_this", "length" : 4 },
+            { "name" : "op_check_tdz", "length" : 2 },
+            { "name" : "op_new_object", "length" : 4 },
+            { "name" : "op_new_array", "length" : 5 },
+            { "name" : "op_new_array_with_size", "length" : 4 },
+            { "name" : "op_new_array_with_spread", "length" : 5 },
+            { "name" : "op_spread", "length" : 3 },
+            { "name" : "op_new_array_buffer", "length" : 5 },
+            { "name" : "op_new_regexp", "length" : 3 },
+            { "name" : "op_mov", "length" : 3 },
+            { "name" : "op_not", "length" : 3 },
+            { "name" : "op_eq", "length" : 4 },
+            { "name" : "op_eq_null", "length" : 3 },
+            { "name" : "op_neq", "length" : 4 },
+            { "name" : "op_neq_null", "length" : 3 },
+            { "name" : "op_stricteq", "length" : 4 },
+            { "name" : "op_nstricteq", "length" : 4 },
+            { "name" : "op_less", "length" : 4 },
+            { "name" : "op_lesseq", "length" : 4 },
+            { "name" : "op_greater", "length" : 4 },
+            { "name" : "op_greatereq", "length" : 4 },
+            { "name" : "op_inc", "length" : 2 },
+            { "name" : "op_dec", "length" : 2 },
+            { "name" : "op_to_number", "length" : 4 },
+            { "name" : "op_to_string", "length" : 3 },
+            { "name" : "op_negate", "length" : 4 },
+            { "name" : "op_add", "length" : 5 },
+            { "name" : "op_mul", "length" : 5 },
+            { "name" : "op_div", "length" : 5 },
+            { "name" : "op_mod", "length" : 4 },
+            { "name" : "op_sub", "length" : 5 },
+            { "name" : "op_pow", "length" : 4 },
+            { "name" : "op_lshift", "length" : 4 },
+            { "name" : "op_rshift", "length" : 4 },
+            { "name" : "op_urshift", "length" : 4 },
+            { "name" : "op_unsigned", "length" : 3 },
+            { "name" : "op_bitand", "length" : 5 },
+            { "name" : "op_bitxor", "length" : 5 },
+            { "name" : "op_bitor", "length" : 5 },
+            { "name" : "op_overrides_has_instance", "length" : 4 },
+            { "name" : "op_instanceof", "length" : 4 },
+            { "name" : "op_instanceof_custom", "length" : 5 },
+            { "name" : "op_typeof", "length" : 3 },
+            { "name" : "op_is_empty", "length" : 3 },
+            { "name" : "op_is_undefined", "length" : 3 },
+            { "name" : "op_is_boolean", "length" : 3 },
+            { "name" : "op_is_number", "length" : 3 },
+            { "name" : "op_is_object", "length" : 3 },
+            { "name" : "op_is_object_or_null", "length" : 3 },
+            { "name" : "op_is_function", "length" : 3 },
+            { "name" : "op_is_cell_with_type", "length" : 4 },
+            { "name" : "op_in", "length" : 4 },
+            { "name" : "op_get_array_length", "length" : 9 },
+            { "name" : "op_get_by_id", "length" : 9  },
+            { "name" : "op_get_by_id_proto_load", "length" : 9 },
+            { "name" : "op_get_by_id_unset", "length" : 9 },
+            { "name" : "op_get_by_id_with_this", "length" : 6 },
+            { "name" : "op_get_by_val_with_this", "length" : 6 },
+            { "name" : "op_try_get_by_id", "length" : 5 },
+            { "name" : "op_put_by_id", "length" : 9 },
+            { "name" : "op_put_by_id_with_this", "length" : 5 },
+            { "name" : "op_del_by_id", "length" : 4 },
+            { "name" : "op_get_by_val", "length" : 6 },
+            { "name" : "op_put_by_val", "length" : 5 },
+            { "name" : "op_put_by_val_with_this", "length" : 5 },
+            { "name" : "op_put_by_val_direct", "length" : 5 },
+            { "name" : "op_del_by_val", "length" : 4 },
+            { "name" : "op_put_by_index", "length" : 4 },
+            { "name" : "op_put_getter_by_id", "length" : 5 },
+            { "name" : "op_put_setter_by_id", "length" : 5 },
+            { "name" : "op_put_getter_setter_by_id", "length" : 6 },
+            { "name" : "op_put_getter_by_val", "length" : 5 },
+            { "name" : "op_put_setter_by_val", "length" : 5 },
+            { "name" : "op_define_data_property", "length" : 5 },
+            { "name" : "op_define_accessor_property", "length" : 6 },
+            { "name" : "op_jmp", "length" : 2 },
+            { "name" : "op_jtrue", "length" : 3 },
+            { "name" : "op_jfalse", "length" : 3 },
+            { "name" : "op_jeq_null", "length" : 3 },
+            { "name" : "op_jneq_null", "length" : 3 },
+            { "name" : "op_jneq_ptr", "length" : 5 },
+            { "name" : "op_jless", "length" : 4 },
+            { "name" : "op_jlesseq", "length" : 4 },
+            { "name" : "op_jgreater", "length" : 4 },
+            { "name" : "op_jgreatereq", "length" : 4 },
+            { "name" : "op_jnless", "length" : 4 },
+            { "name" : "op_jnlesseq", "length" : 4 },
+            { "name" : "op_jngreater", "length" : 4 },
+            { "name" : "op_jngreatereq", "length" : 4 },
+            { "name" : "op_loop_hint", "length" : 1 },
+            { "name" : "op_switch_imm", "length" : 4 },
+            { "name" : "op_switch_char", "length" : 4 },
+            { "name" : "op_switch_string", "length" : 4 },
+            { "name" : "op_new_func", "length" : 4 },
+            { "name" : "op_new_func_exp", "length" : 4 },
+            { "name" : "op_new_generator_func", "length" : 4 },
+            { "name" : "op_new_generator_func_exp", "length" : 4 },
+            { "name" : "op_new_async_func", "length" : 4 },
+            { "name" : "op_new_async_func_exp", "length" : 4 },
+            { "name" : "op_set_function_name", "length" : 3 },
+            { "name" : "op_call", "length" : 9 },
+            { "name" : "op_tail_call", "length" : 9 },
+            { "name" : "op_call_eval", "length" : 9 },
+            { "name" : "op_call_varargs", "length" : 9 },
+            { "name" : "op_tail_call_varargs", "length" : 9 },
+            { "name" : "op_tail_call_forward_arguments", "length" : 9 },
+            { "name" : "op_ret", "length" : 2 },
+            { "name" : "op_construct", "length" : 9 },
+            { "name" : "op_construct_varargs", "length" : 9 },
+            { "name" : "op_strcat", "length" : 4 },
+            { "name" : "op_to_primitive", "length" : 3 },
+            { "name" : "op_resolve_scope", "length" : 7 },
+            { "name" : "op_get_from_scope", "length" : 8 },
+            { "name" : "op_put_to_scope", "length" : 7 },
+            { "name" : "op_get_from_arguments", "length" : 5 },
+            { "name" : "op_put_to_arguments", "length" : 4 },
+            { "name" : "op_push_with_scope", "length" : 4 },
+            { "name" : "op_create_lexical_environment", "length" : 5 },
+            { "name" : "op_get_parent_scope", "length" : 3 },
+            { "name" : "op_catch", "length" : 3 },
+            { "name" : "op_throw", "length" : 2 },
+            { "name" : "op_throw_static_error", "length" : 3 },
+            { "name" : "op_debug", "length" : 3 },
+            { "name" : "op_end", "length" : 2 },
+            { "name" : "op_profile_type", "length" : 6 },
+            { "name" : "op_profile_control_flow", "length" : 2 },
+            { "name" : "op_get_enumerable_length", "length" : 3 },
+            { "name" : "op_has_indexed_property", "length" : 5 },
+            { "name" : "op_has_structure_property", "length" : 5 },
+            { "name" : "op_has_generic_property", "length" : 4 },
+            { "name" : "op_get_direct_pname", "length" : 7 },
+            { "name" : "op_get_property_enumerator", "length" : 3 },
+            { "name" : "op_enumerator_structure_pname", "length" : 4 },
+            { "name" : "op_enumerator_generic_pname", "length" : 4 },
+            { "name" : "op_to_index_string", "length" : 3 },
+            { "name" : "op_assert", "length" : 3 },
+            { "name" : "op_create_rest", "length": 4 },
+            { "name" : "op_get_rest_length", "length": 3 },
+            { "name" : "op_yield", "length" : 4 },
+            { "name" : "op_watchdog", "length" : 1 },
+            { "name" : "op_log_shadow_chicken_prologue", "length" : 2},
+            { "name" : "op_log_shadow_chicken_tail", "length" : 3}
+        ]
+    },
+    {
+        "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1,
+        "macroNameComponent" : "CLOOP_BYTECODE_HELPER",
+        "bytecodes" : [
+            { "name" : "llint_entry" },
+            { "name" : "getHostCallReturnValue" },
+            { "name" : "llint_return_to_host" },
+            { "name" : "llint_vm_entry_to_javascript" },
+            { "name" : "llint_vm_entry_to_native" },
+            { "name" : "llint_cloop_did_return_from_js_1" },
+            { "name" : "llint_cloop_did_return_from_js_2" },
+            { "name" : "llint_cloop_did_return_from_js_3" },
+            { "name" : "llint_cloop_did_return_from_js_4" },
+            { "name" : "llint_cloop_did_return_from_js_5" },
+            { "name" : "llint_cloop_did_return_from_js_6" },
+            { "name" : "llint_cloop_did_return_from_js_7" },
+            { "name" : "llint_cloop_did_return_from_js_8" },
+            { "name" : "llint_cloop_did_return_from_js_9" },
+            { "name" : "llint_cloop_did_return_from_js_10" },
+            { "name" : "llint_cloop_did_return_from_js_11" },
+            { "name" : "llint_cloop_did_return_from_js_12" }
+        ]
+    },
+    {
+        "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1,
+        "macroNameComponent" : "BYTECODE_HELPER",
+        "bytecodes" : [
+            { "name" : "llint_program_prologue" },
+            { "name" : "llint_eval_prologue" },
+            { "name" : "llint_module_program_prologue" },
+            { "name" : "llint_function_for_call_prologue" },
+            { "name" : "llint_function_for_construct_prologue" },
+            { "name" : "llint_function_for_call_arity_check" },
+            { "name" : "llint_function_for_construct_arity_check" },
+            { "name" : "llint_generic_return_point" },
+            { "name" : "llint_throw_from_slow_path_trampoline" },
+            { "name" : "llint_throw_during_call_trampoline" },
+            { "name" : "llint_native_call_trampoline" },
+            { "name" : "llint_native_construct_trampoline" },
+            { "name" : "handleUncaughtException" }
+        ]
+    }
+]
diff --git a/bytecode/BytecodeLivenessAnalysis.cpp b/bytecode/BytecodeLivenessAnalysis.cpp
new file mode 100644
index 0000000..60eeb71
--- /dev/null
+++ b/bytecode/BytecodeLivenessAnalysis.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeLivenessAnalysis.h"
+
+#include "BytecodeKills.h"
+#include "BytecodeLivenessAnalysisInlines.h"
+#include "BytecodeUseDef.h"
+#include "CodeBlock.h"
+#include "FullBytecodeLiveness.h"
+#include "HeapInlines.h"
+#include "InterpreterInlines.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock)
+    : m_graph(codeBlock, codeBlock->instructions())
+{
+    compute();
+}
+
+template
+void BytecodeLivenessAnalysis::computeDefsForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
+{
+    JSC::computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+}
+
+template
+void BytecodeLivenessAnalysis::computeUsesForBytecodeOffset(CodeBlock* codeBlock, OpcodeID opcodeID, Instruction* instruction, FastBitVector&, const Functor& functor)
+{
+    JSC::computeUsesForBytecodeOffset(codeBlock, opcodeID, instruction, functor);
+}
+
+void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result)
+{
+    BytecodeBasicBlock* block = m_graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+    ASSERT(block);
+    ASSERT(!block->isEntryBlock());
+    ASSERT(!block->isExitBlock());
+    result.resize(block->out().numBits());
+    computeLocalLivenessForBytecodeOffset(m_graph, block, bytecodeOffset, result);
+}
+
+bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset)
+{
+    if (operandIsAlwaysLive(operand))
+        return true;
+    FastBitVector result;
+    getLivenessInfoAtBytecodeOffset(bytecodeOffset, result);
+    return operandThatIsNotAlwaysLiveIsLive(result, operand);
+}
+
+FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
+{
+    FastBitVector out;
+    getLivenessInfoAtBytecodeOffset(bytecodeOffset, out);
+    return out;
+}
+
+void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result)
+{
+    FastBitVector out;
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    
+    result.m_map.resize(codeBlock->instructions().size());
+    
+    for (std::unique_ptr& block : m_graph.basicBlocksInReverseOrder()) {
+        if (block->isEntryBlock() || block->isExitBlock())
+            continue;
+        
+        out = block->out();
+        
+        for (unsigned i = block->offsets().size(); i--;) {
+            unsigned bytecodeOffset = block->offsets()[i];
+            stepOverInstruction(m_graph, bytecodeOffset, out);
+            result.m_map[bytecodeOffset] = out;
+        }
+    }
+}
+
+void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result)
+{
+    FastBitVector out;
+    
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    result.m_codeBlock = codeBlock;
+    result.m_killSets = std::make_unique(codeBlock->instructions().size());
+    
+    for (std::unique_ptr& block : m_graph.basicBlocksInReverseOrder()) {
+        if (block->isEntryBlock() || block->isExitBlock())
+            continue;
+        
+        out = block->out();
+        
+        for (unsigned i = block->offsets().size(); i--;) {
+            unsigned bytecodeOffset = block->offsets()[i];
+            stepOverInstruction(
+                m_graph, bytecodeOffset, out,
+                [&] (unsigned index) {
+                    // This is for uses.
+                    if (out[index])
+                        return;
+                    result.m_killSets[bytecodeOffset].add(index);
+                    out[index] = true;
+                },
+                [&] (unsigned index) {
+                    // This is for defs.
+                    out[index] = false;
+                });
+        }
+    }
+}
+
+void BytecodeLivenessAnalysis::dumpResults()
+{
+    CodeBlock* codeBlock = m_graph.codeBlock();
+    dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n");
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    Instruction* instructionsBegin = codeBlock->instructions().begin();
+    unsigned i = 0;
+
+    unsigned numberOfBlocks = m_graph.size();
+    Vector predecessors(numberOfBlocks);
+    for (BytecodeBasicBlock* block : m_graph)
+        predecessors[block->index()].resize(numberOfBlocks);
+    for (BytecodeBasicBlock* block : m_graph) {
+        for (unsigned j = 0; j < block->successors().size(); j++) {
+            unsigned blockIndex = block->index();
+            unsigned successorIndex = block->successors()[j]->index();
+            predecessors[successorIndex][blockIndex] = true;
+        }
+    }
+
+    auto dumpBitVector = [] (FastBitVector& bits) {
+        for (unsigned j = 0; j < bits.numBits(); j++) {
+            if (bits[j])
+                dataLogF(" %u", j);
+        }
+    };
+
+    for (BytecodeBasicBlock* block : m_graph) {
+        dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i++, block, block->leaderOffset(), block->totalLength());
+
+        dataLogF("Predecessors:");
+        dumpBitVector(predecessors[block->index()]);
+        dataLogF("\n");
+
+        dataLogF("Successors:");
+        FastBitVector successors;
+        successors.resize(numberOfBlocks);
+        for (unsigned j = 0; j < block->successors().size(); j++) {
+            BytecodeBasicBlock* successor = block->successors()[j];
+            successors[successor->index()] = true;
+        }
+        dumpBitVector(successors); // Dump in sorted order.
+        dataLogF("\n");
+
+        if (block->isEntryBlock()) {
+            dataLogF("Entry block %p\n", block);
+            continue;
+        }
+        if (block->isExitBlock()) {
+            dataLogF("Exit block: %p\n", block);
+            continue;
+        }
+        for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
+            const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset];
+
+            dataLogF("Live variables:");
+            FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset);
+            dumpBitVector(liveBefore);
+            dataLogF("\n");
+            codeBlock->dumpBytecode(WTF::dataFile(), codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction);
+
+            OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);
+            unsigned opcodeLength = opcodeLengths[opcodeID];
+            bytecodeOffset += opcodeLength;
+        }
+
+        dataLogF("Live variables:");
+        FastBitVector liveAfter = block->out();
+        dumpBitVector(liveAfter);
+        dataLogF("\n");
+    }
+}
+
+void BytecodeLivenessAnalysis::compute()
+{
+    runLivenessFixpoint(m_graph);
+
+    if (Options::dumpBytecodeLivenessResults())
+        dumpResults();
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeLivenessAnalysis.h b/bytecode/BytecodeLivenessAnalysis.h
new file mode 100644
index 0000000..e12cd8e
--- /dev/null
+++ b/bytecode/BytecodeLivenessAnalysis.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeBasicBlock.h"
+#include "BytecodeGraph.h"
+#include "CodeBlock.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class BytecodeKills;
+class FullBytecodeLiveness;
+
+template
+class BytecodeLivenessPropagation {
+protected:
+    template void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor&, const DefFunctor&);
+
+    template void stepOverInstruction(Graph&, unsigned bytecodeOffset, FastBitVector& out);
+
+    template bool computeLocalLivenessForBytecodeOffset(Graph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result);
+
+    template bool computeLocalLivenessForBlock(Graph&, BytecodeBasicBlock*);
+
+    template FastBitVector getLivenessInfoAtBytecodeOffset(Graph&, unsigned bytecodeOffset);
+
+    template void runLivenessFixpoint(Graph&);
+};
+
+class BytecodeLivenessAnalysis : private BytecodeLivenessPropagation {
+    WTF_MAKE_FAST_ALLOCATED;
+    WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis);
+public:
+    friend class BytecodeLivenessPropagation;
+    BytecodeLivenessAnalysis(CodeBlock*);
+    
+    bool operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset);
+    FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset);
+    
+    void computeFullLiveness(FullBytecodeLiveness& result);
+    void computeKills(BytecodeKills& result);
+
+private:
+    void compute();
+    void dumpResults();
+
+    void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&);
+
+    template void computeDefsForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
+    template void computeUsesForBytecodeOffset(CodeBlock*, OpcodeID, Instruction*, FastBitVector&, const Functor&);
+
+    BytecodeGraph m_graph;
+};
+
+inline bool operandIsAlwaysLive(int operand);
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand);
+inline bool operandIsLive(const FastBitVector& out, int operand);
+inline bool isValidRegisterForLiveness(int operand);
+
+} // namespace JSC
diff --git a/bytecode/BytecodeLivenessAnalysisInlines.h b/bytecode/BytecodeLivenessAnalysisInlines.h
new file mode 100644
index 0000000..3371237
--- /dev/null
+++ b/bytecode/BytecodeLivenessAnalysisInlines.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeGraph.h"
+#include "BytecodeLivenessAnalysis.h"
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "Operations.h"
+
+namespace JSC {
+
+inline bool operandIsAlwaysLive(int operand)
+{
+    return !VirtualRegister(operand).isLocal();
+}
+
+inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand)
+{
+    unsigned local = VirtualRegister(operand).toLocal();
+    if (local >= out.numBits())
+        return false;
+    return out[local];
+}
+
+inline bool operandIsLive(const FastBitVector& out, int operand)
+{
+    return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand);
+}
+
+inline bool isValidRegisterForLiveness(int operand)
+{
+    VirtualRegister virtualReg(operand);
+    if (virtualReg.isConstant())
+        return false;
+    return virtualReg.isLocal();
+}
+
+// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes
+// exception handlers in the uses.
+template
+template
+inline void BytecodeLivenessPropagation::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out, const UseFunctor& use, const DefFunctor& def)
+{
+    // This abstractly execute the instruction in reverse. Instructions logically first use operands and
+    // then define operands. This logical ordering is necessary for operations that use and def the same
+    // operand, like:
+    //
+    //     op_add loc1, loc1, loc2
+    //
+    // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add
+    // operation cannot travel forward in time to read the value that it will produce after reading that
+    // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of
+    // uses before defs).
+    //
+    // Since this is a liveness analysis, this ordering ends up being particularly important: if we did
+    // uses before defs, then the add operation above would appear to not have loc1 live, since we'd
+    // first add it to the out set (the use), and then we'd remove it (the def).
+
+    auto* codeBlock = graph.codeBlock();
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    auto* instructionsBegin = graph.instructions().begin();
+    auto* instruction = &instructionsBegin[bytecodeOffset];
+    OpcodeID opcodeID = interpreter->getOpcodeID(*instruction);
+
+    static_cast(this)->computeDefsForBytecodeOffset(
+        codeBlock, opcodeID, instruction, out,
+        [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+            if (isValidRegisterForLiveness(operand))
+                def(VirtualRegister(operand).toLocal());
+        });
+
+    static_cast(this)->computeUsesForBytecodeOffset(
+        codeBlock, opcodeID, instruction, out,
+        [&] (typename Graph::CodeBlock*, typename Graph::Instruction*, OpcodeID, int operand) {
+            if (isValidRegisterForLiveness(operand))
+                use(VirtualRegister(operand).toLocal());
+        });
+
+    // If we have an exception handler, we want the live-in variables of the 
+    // exception handler block to be included in the live-in of this particular bytecode.
+    if (auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) {
+        BytecodeBasicBlock* handlerBlock = graph.findBasicBlockWithLeaderOffset(handler->target);
+        ASSERT(handlerBlock);
+        handlerBlock->in().forEachSetBit(use);
+    }
+}
+
+template
+template
+inline void BytecodeLivenessPropagation::stepOverInstruction(Graph& graph, unsigned bytecodeOffset, FastBitVector& out)
+{
+    stepOverInstruction(
+        graph, bytecodeOffset, out,
+        [&] (unsigned bitIndex) {
+            // This is the use functor, so we set the bit.
+            out[bitIndex] = true;
+        },
+        [&] (unsigned bitIndex) {
+            // This is the def functor, so we clear the bit.
+            out[bitIndex] = false;
+        });
+}
+
+template
+template
+inline bool BytecodeLivenessPropagation::computeLocalLivenessForBytecodeOffset(Graph& graph, BytecodeBasicBlock* block, unsigned targetOffset, FastBitVector& result)
+{
+    ASSERT(!block->isExitBlock());
+    ASSERT(!block->isEntryBlock());
+
+    FastBitVector out = block->out();
+
+    for (int i = block->offsets().size() - 1; i >= 0; i--) {
+        unsigned bytecodeOffset = block->offsets()[i];
+        if (targetOffset > bytecodeOffset)
+            break;
+        stepOverInstruction(graph, bytecodeOffset, out);
+    }
+
+    return result.setAndCheck(out);
+}
+
+template
+template
+inline bool BytecodeLivenessPropagation::computeLocalLivenessForBlock(Graph& graph, BytecodeBasicBlock* block)
+{
+    if (block->isExitBlock() || block->isEntryBlock())
+        return false;
+    return computeLocalLivenessForBytecodeOffset(graph, block, block->leaderOffset(), block->in());
+}
+
+template
+template
+inline FastBitVector BytecodeLivenessPropagation::getLivenessInfoAtBytecodeOffset(Graph& graph, unsigned bytecodeOffset)
+{
+    BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(bytecodeOffset);
+    ASSERT(block);
+    ASSERT(!block->isEntryBlock());
+    ASSERT(!block->isExitBlock());
+    FastBitVector out;
+    out.resize(block->out().numBits());
+    computeLocalLivenessForBytecodeOffset(graph, block, bytecodeOffset, out);
+    return out;
+}
+
+template
+template
+inline void BytecodeLivenessPropagation::runLivenessFixpoint(Graph& graph)
+{
+    auto* codeBlock = graph.codeBlock();
+    unsigned numberOfVariables = codeBlock->numCalleeLocals();
+    for (BytecodeBasicBlock* block : graph) {
+        block->in().resize(numberOfVariables);
+        block->out().resize(numberOfVariables);
+        block->in().clearAll();
+        block->out().clearAll();
+    }
+
+    bool changed;
+    BytecodeBasicBlock* lastBlock = graph.last();
+    lastBlock->in().clearAll();
+    lastBlock->out().clearAll();
+    FastBitVector newOut;
+    newOut.resize(lastBlock->out().numBits());
+    do {
+        changed = false;
+        for (std::unique_ptr& block : graph.basicBlocksInReverseOrder()) {
+            newOut.clearAll();
+            for (BytecodeBasicBlock* successor : block->successors())
+                newOut |= successor->in();
+            block->out() = newOut;
+            changed |= computeLocalLivenessForBlock(graph, block.get());
+        }
+    } while (changed);
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeRewriter.cpp b/bytecode/BytecodeRewriter.cpp
new file mode 100644
index 0000000..6dadb6e
--- /dev/null
+++ b/bytecode/BytecodeRewriter.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeRewriter.h"
+
+#include "HeapInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+#include 
+
+namespace JSC {
+
+void BytecodeRewriter::applyModification()
+{
+    for (size_t insertionIndex = m_insertions.size(); insertionIndex--;) {
+        Insertion& insertion = m_insertions[insertionIndex];
+        if (insertion.type == Insertion::Type::Remove)
+            m_graph.instructions().remove(insertion.index.bytecodeOffset, insertion.length());
+        else {
+            if (insertion.includeBranch == IncludeBranch::Yes) {
+                int finalOffset = insertion.index.bytecodeOffset + calculateDifference(m_insertions.begin(), m_insertions.begin() + insertionIndex);
+                adjustJumpTargetsInFragment(finalOffset, insertion);
+            }
+            m_graph.instructions().insertVector(insertion.index.bytecodeOffset, insertion.instructions);
+        }
+    }
+    m_insertions.clear();
+}
+
+void BytecodeRewriter::execute()
+{
+    WTF::bubbleSort(m_insertions.begin(), m_insertions.end(), [] (const Insertion& lhs, const Insertion& rhs) {
+        return lhs.index < rhs.index;
+    });
+
+    UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+    codeBlock->applyModification(*this);
+}
+
+void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion)
+{
+    auto& fragment = insertion.instructions;
+    UnlinkedInstruction* instructionsBegin = fragment.data();
+    for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) {
+        UnlinkedInstruction& instruction = fragment[fragmentOffset];
+        OpcodeID opcodeID = instruction.u.opcode;
+        if (isBranch(opcodeID)) {
+            unsigned bytecodeOffset = finalOffset + fragmentOffset;
+            UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
+            extractStoredJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, fragmentOffset, [&](int32_t& label) {
+                int absoluteOffset = adjustAbsoluteOffset(label);
+                label = absoluteOffset - static_cast(bytecodeOffset);
+            });
+        }
+        fragmentOffset += opcodeLength(opcodeID);
+    }
+}
+
+void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector&& fragment)
+{
+    ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After);
+    m_insertions.append(Insertion {
+        insertionPoint,
+        Insertion::Type::Insert,
+        includeBranch,
+        0,
+        WTFMove(fragment)
+    });
+}
+
+int BytecodeRewriter::adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint)
+{
+    if (startPoint < jumpTargetPoint) {
+        int jumpTarget = jumpTargetPoint.bytecodeOffset;
+        auto start = std::lower_bound(m_insertions.begin(), m_insertions.end(), startPoint, [&] (const Insertion& insertion, InsertionPoint startPoint) {
+            return insertion.index < startPoint;
+        });
+        if (start != m_insertions.end()) {
+            auto end = std::lower_bound(m_insertions.begin(), m_insertions.end(), jumpTargetPoint, [&] (const Insertion& insertion, InsertionPoint jumpTargetPoint) {
+                return insertion.index < jumpTargetPoint;
+            });
+            jumpTarget += calculateDifference(start, end);
+        }
+        return jumpTarget - startPoint.bytecodeOffset;
+    }
+
+    if (startPoint == jumpTargetPoint)
+        return 0;
+
+    return -adjustJumpTarget(jumpTargetPoint, startPoint);
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeRewriter.h b/bytecode/BytecodeRewriter.h
new file mode 100644
index 0000000..035f900
--- /dev/null
+++ b/bytecode/BytecodeRewriter.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Yusuke Suzuki 
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeGraph.h"
+#include "Bytecodes.h"
+#include "Opcode.h"
+#include "UnlinkedCodeBlock.h"
+#include 
+
+namespace JSC {
+
+// BytecodeRewriter offers the ability to insert and remove the bytecodes including jump operations.
+//
+// We use the original bytecode offsets as labels. When you emit some jumps, you can specify the jump target by
+// using the original bytecode offsets. These bytecode offsets are later converted appropriate values by the
+// rewriter. And we also use the labels to represents the position the new bytecodes inserted.
+//
+//                      |  [bytecode]  |  [bytecode]  |
+//   offsets            A              B              C
+//
+// We can use the above "A", "B", and "C" offsets as labels. And the rewriter has the ability to insert bytecode fragments
+// before and after the label. For example, if you insert the fragment after "B", the layout becomes like this.
+//
+//                      |  [bytecode]  |  [fragment]  [bytecode]  |
+//   offsets            A              B                          C
+//
+//  And even if you remove some original bytecodes, the offset remains as labels. For example, when you remove the A's bytecode,
+//  the layout becomes like this.
+//
+//                      |              |  [bytecode]  |
+//   offsets            A              B              C
+//
+//  And still you can insert fragments before and after "A".
+//
+//                      |  [fragment]  |  [bytecode]  |
+//   offsets            A              B              C
+//
+//   We can insert bytecode fragments "Before" and "After" the labels. This inserted position, either "Before" and "After",
+//   has effect when the label is involved with jumps. For example, when you have jump to the position "B",
+//
+//                      |  [bytecode]  |  [bytecode]  |
+//   offsets            A              B              C
+//                                     ^
+//                                     jump to here.
+//
+//  and you insert the bytecode before/after "B",
+//
+//                      |  [bytecode] [before]  |  [after] [bytecode]  |
+//   offsets            A                       B              C
+//                                              ^
+//                                              jump to here.
+//
+//  as you can see, the execution jumping into "B" does not execute [before] code.
+class BytecodeRewriter {
+WTF_MAKE_NONCOPYABLE(BytecodeRewriter);
+public:
+    enum class Position : int8_t {
+        EntryPoint = -2,
+        Before = -1,
+        LabelPoint = 0,
+        After = 1,
+        OriginalBytecodePoint = 2,
+    };
+
+    enum class IncludeBranch : uint8_t {
+        No = 0,
+        Yes = 1,
+    };
+
+    struct InsertionPoint {
+        int bytecodeOffset;
+        Position position;
+
+        InsertionPoint(int offset, Position pos)
+            : bytecodeOffset(offset)
+            , position(pos)
+        {
+        }
+
+        bool operator<(const InsertionPoint& other) const
+        {
+            if (bytecodeOffset == other.bytecodeOffset)
+                return position < other.position;
+            return bytecodeOffset < other.bytecodeOffset;
+        }
+
+        bool operator==(const InsertionPoint& other) const
+        {
+            return bytecodeOffset == other.bytecodeOffset && position == other.position;
+        }
+    };
+
+private:
+    struct Insertion {
+        enum class Type : uint8_t { Insert = 0, Remove = 1, };
+
+        size_t length() const
+        {
+            if (type == Type::Remove)
+                return removeLength;
+            return instructions.size();
+        }
+
+        InsertionPoint index;
+        Type type;
+        IncludeBranch includeBranch;
+        size_t removeLength;
+        Vector instructions;
+    };
+
+public:
+    class Fragment {
+    WTF_MAKE_NONCOPYABLE(Fragment);
+    public:
+        Fragment(Vector& fragment, IncludeBranch& includeBranch)
+            : m_fragment(fragment)
+            , m_includeBranch(includeBranch)
+        {
+        }
+
+        template
+        void appendInstruction(OpcodeID opcodeID, Args... args)
+        {
+            if (isBranch(opcodeID))
+                m_includeBranch = IncludeBranch::Yes;
+
+            UnlinkedInstruction instructions[sizeof...(args) + 1] = {
+                UnlinkedInstruction(opcodeID),
+                UnlinkedInstruction(args)...
+            };
+            m_fragment.append(instructions, sizeof...(args) + 1);
+        }
+
+    private:
+        Vector& m_fragment;
+        IncludeBranch& m_includeBranch;
+    };
+
+    BytecodeRewriter(BytecodeGraph& graph)
+        : m_graph(graph)
+    {
+    }
+
+    template
+    void insertFragmentBefore(unsigned bytecodeOffset, Function function)
+    {
+        IncludeBranch includeBranch = IncludeBranch::No;
+        Vector instructions;
+        Fragment fragment(instructions, includeBranch);
+        function(fragment);
+        insertImpl(InsertionPoint(bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions));
+    }
+
+    template
+    void insertFragmentAfter(unsigned bytecodeOffset, Function function)
+    {
+        IncludeBranch includeBranch = IncludeBranch::No;
+        Vector instructions;
+        Fragment fragment(instructions, includeBranch);
+        function(fragment);
+        insertImpl(InsertionPoint(bytecodeOffset, Position::After), includeBranch, WTFMove(instructions));
+    }
+
+    void removeBytecode(unsigned bytecodeOffset)
+    {
+        m_insertions.append(Insertion { InsertionPoint(bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_graph.instructions()[bytecodeOffset].u.opcode), { } });
+    }
+
+    void execute();
+
+    BytecodeGraph& graph() { return m_graph; }
+
+    int adjustAbsoluteOffset(int absoluteOffset)
+    {
+        return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint));
+    }
+
+    int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget)
+    {
+        return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint));
+    }
+
+private:
+    void insertImpl(InsertionPoint, IncludeBranch, Vector&& fragment);
+
+    friend class UnlinkedCodeBlock;
+    void applyModification();
+    void adjustJumpTargetsInFragment(unsigned finalOffset, Insertion&);
+
+    int adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint);
+    template int calculateDifference(Iterator begin, Iterator end);
+
+    BytecodeGraph& m_graph;
+    Vector m_insertions;
+};
+
+template
+inline int BytecodeRewriter::calculateDifference(Iterator begin, Iterator end)
+{
+    int result = 0;
+    for (; begin != end; ++begin) {
+        if (begin->type == Insertion::Type::Remove)
+            result -= begin->length();
+        else
+            result += begin->length();
+    }
+    return result;
+}
+
+} // namespace JSC
diff --git a/bytecode/BytecodeUseDef.h b/bytecode/BytecodeUseDef.h
new file mode 100644
index 0000000..99b9394
--- /dev/null
+++ b/bytecode/BytecodeUseDef.h
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+
+namespace JSC {
+
+template
+void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
+{
+    if (opcodeID != op_enter && codeBlock->wasCompiledWithDebuggingOpcodes() && codeBlock->scopeRegister().isValid())
+        functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
+
+    switch (opcodeID) {
+    // No uses.
+    case op_new_regexp:
+    case op_new_array_buffer:
+    case op_throw_static_error:
+    case op_debug:
+    case op_jneq_ptr:
+    case op_loop_hint:
+    case op_jmp:
+    case op_new_object:
+    case op_enter:
+    case op_argument_count:
+    case op_catch:
+    case op_profile_control_flow:
+    case op_create_direct_arguments:
+    case op_create_cloned_arguments:
+    case op_get_rest_length:
+    case op_watchdog:
+    case op_get_argument:
+        return;
+    case op_assert:
+    case op_get_scope:
+    case op_to_this:
+    case op_check_tdz:
+    case op_profile_type:
+    case op_throw:
+    case op_end:
+    case op_ret:
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+    case op_dec:
+    case op_inc:
+    case op_log_shadow_chicken_prologue: {
+        ASSERT(opcodeLengths[opcodeID] > 1);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        return;
+    }
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+    case op_jless:
+    case op_set_function_name:
+    case op_log_shadow_chicken_tail: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        return;
+    }
+    case op_put_by_val_direct:
+    case op_put_by_val: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        return;
+    }
+    case op_put_by_index:
+    case op_put_by_id:
+    case op_put_to_scope:
+    case op_put_to_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        return;
+    }
+    case op_put_by_id_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_put_by_val_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_put_getter_by_id:
+    case op_put_setter_by_id: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_put_getter_setter_by_id: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+        return;
+    }
+    case op_put_getter_by_val:
+    case op_put_setter_by_val: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_define_data_property: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_define_accessor_property: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+        return;
+    }
+    case op_spread:
+    case op_get_property_enumerator:
+    case op_get_enumerable_length:
+    case op_new_func_exp:
+    case op_new_generator_func_exp:
+    case op_new_async_func_exp:
+    case op_to_index_string:
+    case op_create_lexical_environment:
+    case op_resolve_scope:
+    case op_get_from_scope:
+    case op_to_primitive:
+    case op_try_get_by_id:
+    case op_get_by_id:
+    case op_get_by_id_proto_load:
+    case op_get_by_id_unset:
+    case op_get_array_length:
+    case op_typeof:
+    case op_is_empty:
+    case op_is_undefined:
+    case op_is_boolean:
+    case op_is_number:
+    case op_is_object:
+    case op_is_object_or_null:
+    case op_is_cell_with_type:
+    case op_is_function:
+    case op_to_number:
+    case op_to_string:
+    case op_negate:
+    case op_neq_null:
+    case op_eq_null:
+    case op_not:
+    case op_mov:
+    case op_new_array_with_size:
+    case op_create_this:
+    case op_del_by_id:
+    case op_unsigned:
+    case op_new_func:
+    case op_new_generator_func:
+    case op_new_async_func:
+    case op_get_parent_scope:
+    case op_create_scoped_arguments:
+    case op_create_rest:
+    case op_get_from_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        return;
+    }
+    case op_has_generic_property:
+    case op_has_indexed_property:
+    case op_enumerator_structure_pname:
+    case op_enumerator_generic_pname:
+    case op_get_by_val:
+    case op_in:
+    case op_overrides_has_instance:
+    case op_instanceof:
+    case op_add:
+    case op_mul:
+    case op_div:
+    case op_mod:
+    case op_sub:
+    case op_pow:
+    case op_lshift:
+    case op_rshift:
+    case op_urshift:
+    case op_bitand:
+    case op_bitxor:
+    case op_bitor:
+    case op_less:
+    case op_lesseq:
+    case op_greater:
+    case op_greatereq:
+    case op_nstricteq:
+    case op_stricteq:
+    case op_neq:
+    case op_eq:
+    case op_push_with_scope:
+    case op_get_by_id_with_this:
+    case op_del_by_val:
+    case op_tail_call_forward_arguments: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        return;
+    }
+    case op_get_by_val_with_this: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_instanceof_custom:
+    case op_has_structure_property:
+    case op_construct_varargs:
+    case op_call_varargs:
+    case op_tail_call_varargs: {
+        ASSERT(opcodeLengths[opcodeID] > 4);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        return;
+    }
+    case op_get_direct_pname: {
+        ASSERT(opcodeLengths[opcodeID] > 5);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[4].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[5].u.operand);
+        return;
+    }
+    case op_switch_string:
+    case op_switch_char:
+    case op_switch_imm: {
+        ASSERT(opcodeLengths[opcodeID] > 3);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        return;
+    }
+    case op_new_array_with_spread:
+    case op_new_array:
+    case op_strcat: {
+        int base = instruction[2].u.operand;
+        int count = instruction[3].u.operand;
+        for (int i = 0; i < count; i++)
+            functor(codeBlock, instruction, opcodeID, base - i);
+        return;
+    }
+    case op_construct:
+    case op_call_eval:
+    case op_call:
+    case op_tail_call: {
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        int argCount = instruction[3].u.operand;
+        int registerOffset = -instruction[4].u.operand;
+        int lastArg = registerOffset + CallFrame::thisArgumentOffset();
+        for (int i = 0; i < argCount; i++)
+            functor(codeBlock, instruction, opcodeID, lastArg + i);
+        if (opcodeID == op_call_eval)
+            functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset());
+        return;
+    }
+    case op_yield: {
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[3].u.operand);
+        return;
+    }
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+}
+
+template
+void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)
+{
+    switch (opcodeID) {
+    // These don't define anything.
+    case op_put_to_scope:
+    case op_end:
+    case op_throw:
+    case op_throw_static_error:
+    case op_assert:
+    case op_debug:
+    case op_ret:
+    case op_jmp:
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+    case op_jneq_ptr:
+    case op_jless:
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+    case op_loop_hint:
+    case op_switch_imm:
+    case op_switch_char:
+    case op_switch_string:
+    case op_put_by_id:
+    case op_put_by_id_with_this:
+    case op_put_by_val_with_this:
+    case op_put_getter_by_id:
+    case op_put_setter_by_id:
+    case op_put_getter_setter_by_id:
+    case op_put_getter_by_val:
+    case op_put_setter_by_val:
+    case op_put_by_val:
+    case op_put_by_val_direct:
+    case op_put_by_index:
+    case op_define_data_property:
+    case op_define_accessor_property:
+    case op_profile_type:
+    case op_profile_control_flow:
+    case op_put_to_arguments:
+    case op_set_function_name:
+    case op_watchdog:
+    case op_log_shadow_chicken_prologue:
+    case op_log_shadow_chicken_tail:
+    case op_yield:
+#define LLINT_HELPER_OPCODES(opcode, length) case opcode:
+        FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES);
+#undef LLINT_HELPER_OPCODES
+        return;
+    // These all have a single destination for the first argument.
+    case op_argument_count:
+    case op_to_index_string:
+    case op_get_enumerable_length:
+    case op_has_indexed_property:
+    case op_has_structure_property:
+    case op_has_generic_property:
+    case op_get_direct_pname:
+    case op_get_property_enumerator:
+    case op_enumerator_structure_pname:
+    case op_enumerator_generic_pname:
+    case op_get_parent_scope:
+    case op_push_with_scope:
+    case op_create_lexical_environment:
+    case op_resolve_scope:
+    case op_strcat:
+    case op_to_primitive:
+    case op_create_this:
+    case op_new_array:
+    case op_new_array_with_spread:
+    case op_spread:
+    case op_new_array_buffer:
+    case op_new_array_with_size:
+    case op_new_regexp:
+    case op_new_func:
+    case op_new_func_exp:
+    case op_new_generator_func:
+    case op_new_generator_func_exp:
+    case op_new_async_func:
+    case op_new_async_func_exp:
+    case op_call_varargs:
+    case op_tail_call_varargs:
+    case op_tail_call_forward_arguments:
+    case op_construct_varargs:
+    case op_get_from_scope:
+    case op_call:
+    case op_tail_call:
+    case op_call_eval:
+    case op_construct:
+    case op_try_get_by_id:
+    case op_get_by_id:
+    case op_get_by_id_proto_load:
+    case op_get_by_id_unset:
+    case op_get_by_id_with_this:
+    case op_get_by_val_with_this:
+    case op_get_array_length:
+    case op_overrides_has_instance:
+    case op_instanceof:
+    case op_instanceof_custom:
+    case op_get_by_val:
+    case op_typeof:
+    case op_is_empty:
+    case op_is_undefined:
+    case op_is_boolean:
+    case op_is_number:
+    case op_is_object:
+    case op_is_object_or_null:
+    case op_is_cell_with_type:
+    case op_is_function:
+    case op_in:
+    case op_to_number:
+    case op_to_string:
+    case op_negate:
+    case op_add:
+    case op_mul:
+    case op_div:
+    case op_mod:
+    case op_sub:
+    case op_pow:
+    case op_lshift:
+    case op_rshift:
+    case op_urshift:
+    case op_bitand:
+    case op_bitxor:
+    case op_bitor:
+    case op_inc:
+    case op_dec:
+    case op_eq:
+    case op_neq:
+    case op_stricteq:
+    case op_nstricteq:
+    case op_less:
+    case op_lesseq:
+    case op_greater:
+    case op_greatereq:
+    case op_neq_null:
+    case op_eq_null:
+    case op_not:
+    case op_mov:
+    case op_new_object:
+    case op_to_this:
+    case op_check_tdz:
+    case op_get_scope:
+    case op_create_direct_arguments:
+    case op_create_scoped_arguments:
+    case op_create_cloned_arguments:
+    case op_del_by_id:
+    case op_del_by_val:
+    case op_unsigned:
+    case op_get_from_arguments: 
+    case op_get_argument:
+    case op_create_rest:
+    case op_get_rest_length: {
+        ASSERT(opcodeLengths[opcodeID] > 1);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        return;
+    }
+    case op_catch: {
+        ASSERT(opcodeLengths[opcodeID] > 2);
+        functor(codeBlock, instruction, opcodeID, instruction[1].u.operand);
+        functor(codeBlock, instruction, opcodeID, instruction[2].u.operand);
+        return;
+    }
+    case op_enter: {
+        for (unsigned i = codeBlock->m_numVars; i--;)
+            functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset());
+        return;
+    }
+    }
+}
+
+} // namespace JSC
diff --git a/bytecode/CallEdge.cpp b/bytecode/CallEdge.cpp
new file mode 100644
index 0000000..dffff6d
--- /dev/null
+++ b/bytecode/CallEdge.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallEdge.h"
+
+namespace JSC {
+
+void CallEdge::dump(PrintStream& out) const
+{
+    out.print("<", m_callee, ", count: ", m_count, ">");
+}
+
+} // namespace JSC
+
diff --git a/bytecode/CallEdge.h b/bytecode/CallEdge.h
new file mode 100644
index 0000000..8c7abbc
--- /dev/null
+++ b/bytecode/CallEdge.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallVariant.h"
+
+namespace JSC {
+
+class CallEdge {
+public:
+    CallEdge();
+    CallEdge(CallVariant, uint32_t);
+    
+    bool operator!() const { return !m_callee; }
+    
+    CallVariant callee() const { return m_callee; }
+    uint32_t count() const { return m_count; }
+    
+    CallEdge despecifiedClosure() const
+    {
+        return CallEdge(m_callee.despecifiedClosure(), m_count);
+    }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    CallVariant m_callee;
+    uint32_t m_count;
+};
+
+inline CallEdge::CallEdge(CallVariant callee, uint32_t count)
+    : m_callee(callee)
+    , m_count(count)
+{
+}
+
+inline CallEdge::CallEdge()
+    : CallEdge(CallVariant(), 0)
+{
+}
+
+typedef Vector CallEdgeList;
+
+} // namespace JSC
diff --git a/bytecode/CallLinkInfo.cpp b/bytecode/CallLinkInfo.cpp
new file mode 100644
index 0000000..7ffda05
--- /dev/null
+++ b/bytecode/CallLinkInfo.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2012-2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallLinkInfo.h"
+
+#include "CallFrameShuffleData.h"
+#include "DFGOperations.h"
+#include "DFGThunks.h"
+#include "FunctionCodeBlock.h"
+#include "JSCInlines.h"
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "Repatch.h"
+#include 
+
+#if ENABLE(JIT)
+namespace JSC {
+
+CallLinkInfo::CallType CallLinkInfo::callTypeFor(OpcodeID opcodeID)
+{
+    if (opcodeID == op_call || opcodeID == op_call_eval)
+        return Call;
+    if (opcodeID == op_call_varargs)
+        return CallVarargs;
+    if (opcodeID == op_construct)
+        return Construct;
+    if (opcodeID == op_construct_varargs)
+        return ConstructVarargs;
+    if (opcodeID == op_tail_call)
+        return TailCall;
+    ASSERT(opcodeID == op_tail_call_varargs || op_tail_call_forward_arguments);
+    return TailCallVarargs;
+}
+
+CallLinkInfo::CallLinkInfo()
+    : m_hasSeenShouldRepatch(false)
+    , m_hasSeenClosure(false)
+    , m_clearedByGC(false)
+    , m_allowStubs(true)
+    , m_isLinked(false)
+    , m_callType(None)
+    , m_calleeGPR(255)
+    , m_maxNumArguments(0)
+    , m_slowPathCount(0)
+{
+}
+
+CallLinkInfo::~CallLinkInfo()
+{
+    clearStub();
+    
+    if (isOnList())
+        remove();
+}
+
+void CallLinkInfo::clearStub()
+{
+    if (!stub())
+        return;
+
+    m_stub->clearCallNodesFor(this);
+    m_stub = nullptr;
+}
+
+void CallLinkInfo::unlink(VM& vm)
+{
+    // We could be called even if we're not linked anymore because of how polymorphic calls
+    // work. Each callsite within the polymorphic call stub may separately ask us to unlink().
+    if (isLinked())
+        unlinkFor(vm, *this);
+
+    // Either we were unlinked, in which case we should not have been on any list, or we unlinked
+    // ourselves so that we're not on any list anymore.
+    RELEASE_ASSERT(!isOnList());
+}
+
+CodeLocationNearCall CallLinkInfo::callReturnLocation()
+{
+    RELEASE_ASSERT(!isDirect());
+    return CodeLocationNearCall(m_callReturnLocationOrPatchableJump, Regular);
+}
+
+CodeLocationJump CallLinkInfo::patchableJump()
+{
+    RELEASE_ASSERT(callType() == DirectTailCall);
+    return CodeLocationJump(m_callReturnLocationOrPatchableJump);
+}
+
+CodeLocationDataLabelPtr CallLinkInfo::hotPathBegin()
+{
+    RELEASE_ASSERT(!isDirect());
+    return CodeLocationDataLabelPtr(m_hotPathBeginOrSlowPathStart);
+}
+
+CodeLocationLabel CallLinkInfo::slowPathStart()
+{
+    RELEASE_ASSERT(isDirect());
+    return m_hotPathBeginOrSlowPathStart;
+}
+
+void CallLinkInfo::setCallee(VM& vm, JSCell* owner, JSFunction* callee)
+{
+    RELEASE_ASSERT(!isDirect());
+    MacroAssembler::repatchPointer(hotPathBegin(), callee);
+    m_calleeOrCodeBlock.set(vm, owner, callee);
+    m_isLinked = true;
+}
+
+void CallLinkInfo::clearCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    MacroAssembler::repatchPointer(hotPathBegin(), nullptr);
+    m_calleeOrCodeBlock.clear();
+    m_isLinked = false;
+}
+
+JSFunction* CallLinkInfo::callee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return jsCast(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setCodeBlock(VM& vm, JSCell* owner, FunctionCodeBlock* codeBlock)
+{
+    RELEASE_ASSERT(isDirect());
+    m_calleeOrCodeBlock.setMayBeNull(vm, owner, codeBlock);
+    m_isLinked = true;
+}
+
+void CallLinkInfo::clearCodeBlock()
+{
+    RELEASE_ASSERT(isDirect());
+    m_calleeOrCodeBlock.clear();
+    m_isLinked = false;
+}
+
+FunctionCodeBlock* CallLinkInfo::codeBlock()
+{
+    RELEASE_ASSERT(isDirect());
+    return jsCast(m_calleeOrCodeBlock.get());
+}
+
+void CallLinkInfo::setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee)
+{
+    RELEASE_ASSERT(!isDirect());
+    m_lastSeenCalleeOrExecutable.set(vm, owner, callee);
+}
+
+void CallLinkInfo::clearLastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    m_lastSeenCalleeOrExecutable.clear();
+}
+
+JSFunction* CallLinkInfo::lastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return jsCast(m_lastSeenCalleeOrExecutable.get());
+}
+
+bool CallLinkInfo::haveLastSeenCallee()
+{
+    RELEASE_ASSERT(!isDirect());
+    return !!m_lastSeenCalleeOrExecutable;
+}
+
+void CallLinkInfo::setExecutableDuringCompilation(ExecutableBase* executable)
+{
+    RELEASE_ASSERT(isDirect());
+    m_lastSeenCalleeOrExecutable.setWithoutWriteBarrier(executable);
+}
+
+ExecutableBase* CallLinkInfo::executable()
+{
+    RELEASE_ASSERT(isDirect());
+    return jsCast(m_lastSeenCalleeOrExecutable.get());
+}
+
+void CallLinkInfo::setMaxNumArguments(unsigned value)
+{
+    RELEASE_ASSERT(isDirect());
+    RELEASE_ASSERT(value);
+    m_maxNumArguments = value;
+}
+
+void CallLinkInfo::visitWeak(VM& vm)
+{
+    auto handleSpecificCallee = [&] (JSFunction* callee) {
+        if (Heap::isMarked(callee->executable()))
+            m_hasSeenClosure = true;
+        else
+            m_clearedByGC = true;
+    };
+    
+    if (isLinked()) {
+        if (stub()) {
+            if (!stub()->visitWeak(vm)) {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing closure call to ",
+                        listDump(stub()->variants()), ", stub routine ", RawPointer(stub()),
+                        ".\n");
+                }
+                unlink(vm);
+                m_clearedByGC = true;
+            }
+        } else if (!Heap::isMarked(m_calleeOrCodeBlock.get())) {
+            if (isDirect()) {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing call to ", RawPointer(codeBlock()), " (",
+                        pointerDump(codeBlock()), ").\n");
+                }
+            } else {
+                if (Options::verboseOSR()) {
+                    dataLog(
+                        "Clearing call to ",
+                        RawPointer(callee()), " (",
+                        callee()->executable()->hashFor(specializationKind()),
+                        ").\n");
+                }
+                handleSpecificCallee(callee());
+            }
+            unlink(vm);
+        } else if (isDirect() && !Heap::isMarked(m_lastSeenCalleeOrExecutable.get())) {
+            if (Options::verboseOSR()) {
+                dataLog(
+                    "Clearing call to ", RawPointer(executable()),
+                    " because the executable is dead.\n");
+            }
+            unlink(vm);
+            // We should only get here once the owning CodeBlock is dying, since the executable must
+            // already be in the owner's weak references.
+            m_lastSeenCalleeOrExecutable.clear();
+        }
+    }
+    if (!isDirect() && haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) {
+        handleSpecificCallee(lastSeenCallee());
+        clearLastSeenCallee();
+    }
+}
+
+void CallLinkInfo::setFrameShuffleData(const CallFrameShuffleData& shuffleData)
+{
+    m_frameShuffleData = std::make_unique(shuffleData);
+}
+
+} // namespace JSC
+#endif // ENABLE(JIT)
+
diff --git a/bytecode/CallLinkInfo.h b/bytecode/CallLinkInfo.h
new file mode 100644
index 0000000..0a91020
--- /dev/null
+++ b/bytecode/CallLinkInfo.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallMode.h"
+#include "CodeLocation.h"
+#include "CodeSpecializationKind.h"
+#include "PolymorphicCallStubRoutine.h"
+#include "WriteBarrier.h"
+#include 
+
+namespace JSC {
+
+#if ENABLE(JIT)
+
+class FunctionCodeBlock;
+class JSFunction;
+enum OpcodeID : unsigned;
+struct CallFrameShuffleData;
+
+class CallLinkInfo : public BasicRawSentinelNode {
+public:
+    enum CallType {
+        None,
+        Call,
+        CallVarargs,
+        Construct,
+        ConstructVarargs,
+        TailCall,
+        TailCallVarargs,
+        DirectCall,
+        DirectConstruct,
+        DirectTailCall
+    };
+    
+    static CallType callTypeFor(OpcodeID opcodeID);
+
+    static bool isVarargsCallType(CallType callType)
+    {
+        switch (callType) {
+        case CallVarargs:
+        case ConstructVarargs:
+        case TailCallVarargs:
+            return true;
+
+        default:
+            return false;
+        }
+    }
+
+    CallLinkInfo();
+        
+    ~CallLinkInfo();
+    
+    static CodeSpecializationKind specializationKindFor(CallType callType)
+    {
+        return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs || callType == DirectConstruct);
+    }
+    CodeSpecializationKind specializationKind() const
+    {
+        return specializationKindFor(static_cast(m_callType));
+    }
+    
+    static CallMode callModeFor(CallType callType)
+    {
+        switch (callType) {
+        case Call:
+        case CallVarargs:
+        case DirectCall:
+            return CallMode::Regular;
+        case TailCall:
+        case TailCallVarargs:
+        case DirectTailCall:
+            return CallMode::Tail;
+        case Construct:
+        case ConstructVarargs:
+        case DirectConstruct:
+            return CallMode::Construct;
+        case None:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static bool isDirect(CallType callType)
+    {
+        switch (callType) {
+        case DirectCall:
+        case DirectTailCall:
+        case DirectConstruct:
+            return true;
+        case Call:
+        case CallVarargs:
+        case TailCall:
+        case TailCallVarargs:
+        case Construct:
+        case ConstructVarargs:
+            return false;
+        case None:
+            RELEASE_ASSERT_NOT_REACHED();
+            return false;
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+    
+    CallMode callMode() const
+    {
+        return callModeFor(static_cast(m_callType));
+    }
+
+    bool isDirect()
+    {
+        return isDirect(static_cast(m_callType));
+    }
+
+    bool isTailCall() const
+    {
+        return callMode() == CallMode::Tail;
+    }
+    
+    NearCallMode nearCallMode() const
+    {
+        return isTailCall() ? Tail : Regular;
+    }
+
+    bool isVarargs() const
+    {
+        return isVarargsCallType(static_cast(m_callType));
+    }
+
+    bool isLinked() { return m_stub || m_calleeOrCodeBlock; }
+    void unlink(VM&);
+
+    void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR)
+    {
+        m_callType = callType;
+        m_codeOrigin = codeOrigin;
+        m_calleeGPR = calleeGPR;
+    }
+
+    void setCallLocations(
+        CodeLocationLabel callReturnLocationOrPatchableJump,
+        CodeLocationLabel hotPathBeginOrSlowPathStart,
+        CodeLocationNearCall hotPathOther)
+    {
+        m_callReturnLocationOrPatchableJump = callReturnLocationOrPatchableJump;
+        m_hotPathBeginOrSlowPathStart = hotPathBeginOrSlowPathStart;
+        m_hotPathOther = hotPathOther;
+    }
+
+    bool allowStubs() const { return m_allowStubs; }
+
+    void disallowStubs()
+    {
+        m_allowStubs = false;
+    }
+
+    CodeLocationNearCall callReturnLocation();
+    CodeLocationJump patchableJump();
+    CodeLocationDataLabelPtr hotPathBegin();
+    CodeLocationLabel slowPathStart();
+
+    CodeLocationNearCall hotPathOther()
+    {
+        return m_hotPathOther;
+    }
+
+    void setCallee(VM&, JSCell*, JSFunction* callee);
+    void clearCallee();
+    JSFunction* callee();
+
+    void setCodeBlock(VM&, JSCell*, FunctionCodeBlock*);
+    void clearCodeBlock();
+    FunctionCodeBlock* codeBlock();
+
+    void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee);
+    void clearLastSeenCallee();
+    JSFunction* lastSeenCallee();
+    bool haveLastSeenCallee();
+    
+    void setExecutableDuringCompilation(ExecutableBase*);
+    ExecutableBase* executable();
+    
+    void setStub(PassRefPtr newStub)
+    {
+        clearStub();
+        m_stub = newStub;
+    }
+
+    void clearStub();
+
+    PolymorphicCallStubRoutine* stub()
+    {
+        return m_stub.get();
+    }
+
+    void setSlowStub(PassRefPtr newSlowStub)
+    {
+        m_slowStub = newSlowStub;
+    }
+
+    void clearSlowStub()
+    {
+        m_slowStub = nullptr;
+    }
+
+    JITStubRoutine* slowStub()
+    {
+        return m_slowStub.get();
+    }
+
+    bool seenOnce()
+    {
+        return m_hasSeenShouldRepatch;
+    }
+
+    void clearSeen()
+    {
+        m_hasSeenShouldRepatch = false;
+    }
+
+    void setSeen()
+    {
+        m_hasSeenShouldRepatch = true;
+    }
+
+    bool hasSeenClosure()
+    {
+        return m_hasSeenClosure;
+    }
+
+    void setHasSeenClosure()
+    {
+        m_hasSeenClosure = true;
+    }
+
+    bool clearedByGC()
+    {
+        return m_clearedByGC;
+    }
+
+    void setCallType(CallType callType)
+    {
+        m_callType = callType;
+    }
+
+    CallType callType()
+    {
+        return static_cast(m_callType);
+    }
+
+    uint32_t* addressOfMaxNumArguments()
+    {
+        return &m_maxNumArguments;
+    }
+
+    uint32_t maxNumArguments()
+    {
+        return m_maxNumArguments;
+    }
+    
+    void setMaxNumArguments(unsigned);
+
+    static ptrdiff_t offsetOfSlowPathCount()
+    {
+        return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount);
+    }
+
+    void setCalleeGPR(unsigned calleeGPR)
+    {
+        m_calleeGPR = calleeGPR;
+    }
+
+    unsigned calleeGPR()
+    {
+        return m_calleeGPR;
+    }
+
+    uint32_t slowPathCount()
+    {
+        return m_slowPathCount;
+    }
+
+    void setCodeOrigin(CodeOrigin codeOrigin)
+    {
+        m_codeOrigin = codeOrigin;
+    }
+
+    CodeOrigin codeOrigin()
+    {
+        return m_codeOrigin;
+    }
+
+    void visitWeak(VM&);
+
+    void setFrameShuffleData(const CallFrameShuffleData&);
+
+    const CallFrameShuffleData* frameShuffleData()
+    {
+        return m_frameShuffleData.get();
+    }
+
+private:
+    CodeLocationLabel m_callReturnLocationOrPatchableJump;
+    CodeLocationLabel m_hotPathBeginOrSlowPathStart;
+    CodeLocationNearCall m_hotPathOther;
+    WriteBarrier m_calleeOrCodeBlock;
+    WriteBarrier m_lastSeenCalleeOrExecutable;
+    RefPtr m_stub;
+    RefPtr m_slowStub;
+    std::unique_ptr m_frameShuffleData;
+    bool m_hasSeenShouldRepatch : 1;
+    bool m_hasSeenClosure : 1;
+    bool m_clearedByGC : 1;
+    bool m_allowStubs : 1;
+    bool m_isLinked : 1;
+    unsigned m_callType : 4; // CallType
+    unsigned m_calleeGPR : 8;
+    uint32_t m_maxNumArguments; // For varargs: the profiled maximum number of arguments. For direct: the number of stack slots allocated for arguments.
+    uint32_t m_slowPathCount;
+    CodeOrigin m_codeOrigin;
+};
+
+inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo)
+{
+    return callLinkInfo.codeOrigin();
+}
+
+typedef HashMap CallLinkInfoMap;
+
+#else // ENABLE(JIT)
+
+typedef HashMap CallLinkInfoMap;
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
diff --git a/bytecode/CallLinkStatus.cpp b/bytecode/CallLinkStatus.cpp
new file mode 100644
index 0000000..cbc555d
--- /dev/null
+++ b/bytecode/CallLinkStatus.cpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallLinkStatus.h"
+
+#include "CallLinkInfo.h"
+#include "CodeBlock.h"
+#include "DFGJITCode.h"
+#include "InlineCallFrame.h"
+#include "Interpreter.h"
+#include "LLIntCallLinkInfo.h"
+#include "JSCInlines.h"
+#include 
+#include 
+
+namespace JSC {
+
+static const bool verbose = false;
+
+CallLinkStatus::CallLinkStatus(JSValue value)
+    : m_couldTakeSlowPath(false)
+    , m_isProved(false)
+{
+    if (!value || !value.isCell()) {
+        m_couldTakeSlowPath = true;
+        return;
+    }
+    
+    m_variants.append(CallVariant(value.asCell()));
+}
+
+CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(DFG_JIT)
+    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+        // We could force this to be a closure call, but instead we'll just assume that it
+        // takes slow path.
+        return takesSlowPath();
+    }
+#else
+    UNUSED_PARAM(locker);
+#endif
+
+    VM& vm = *profiledBlock->vm();
+    
+    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+    OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode);
+    if (op != op_call && op != op_construct && op != op_tail_call)
+        return CallLinkStatus();
+    
+    LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;
+    
+    return CallLinkStatus(callLinkInfo->lastSeenCallee.get());
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+    CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map)
+{
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
+    
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+    UNUSED_PARAM(map);
+#if ENABLE(DFG_JIT)
+    ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex);
+    
+    CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex));
+    if (!callLinkInfo) {
+        if (exitSiteData.takesSlowPath)
+            return takesSlowPath();
+        return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
+    }
+    
+    return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
+#else
+    return CallLinkStatus();
+#endif
+}
+
+CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    ExitSiteData exitSiteData;
+    
+#if ENABLE(DFG_JIT)
+    exitSiteData.takesSlowPath =
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable));
+    exitSiteData.badFunction =
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell));
+#else
+    UNUSED_PARAM(locker);
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+#endif
+    
+    return exitSiteData;
+}
+
+#if ENABLE(JIT)
+CallLinkStatus CallLinkStatus::computeFor(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
+{
+    // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
+    UNUSED_PARAM(profiledBlock);
+    
+    CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo);
+    result.m_maxNumArguments = callLinkInfo.maxNumArguments();
+    return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
+    const ConcurrentJSLocker&, CallLinkInfo& callLinkInfo)
+{
+    if (callLinkInfo.clearedByGC())
+        return takesSlowPath();
+    
+    // Note that despite requiring that the locker is held, this code is racy with respect
+    // to the CallLinkInfo: it may get cleared while this code runs! This is because
+    // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns
+    // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns
+    // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock()
+    // itself to figure out which lock to lock.
+    //
+    // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow
+    // path count, the stub, and the target - can all be asked racily. Stubs and targets can
+    // only be deleted at next GC, so if we load a non-null one, then it must contain data
+    // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness
+    // is probably OK for now.
+    
+    // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive
+    // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is
+    // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative
+    // fencing in place to make sure that we see the variants list after construction.
+    if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) {
+        WTF::loadLoadFence();
+        
+        CallEdgeList edges = stub->edges();
+        
+        // Now that we've loaded the edges list, there are no further concurrency concerns. We will
+        // just manipulate and prune this list to our liking - mostly removing entries that are too
+        // infrequent and ensuring that it's sorted in descending order of frequency.
+        
+        RELEASE_ASSERT(edges.size());
+        
+        std::sort(
+            edges.begin(), edges.end(),
+            [] (CallEdge a, CallEdge b) {
+                return a.count() > b.count();
+            });
+        RELEASE_ASSERT(edges.first().count() >= edges.last().count());
+        
+        double totalCallsToKnown = 0;
+        double totalCallsToUnknown = callLinkInfo.slowPathCount();
+        CallVariantList variants;
+        for (size_t i = 0; i < edges.size(); ++i) {
+            CallEdge edge = edges[i];
+            // If the call is at the tail of the distribution, then we don't optimize it and we
+            // treat it as if it was a call to something unknown. We define the tail as being either
+            // a call that doesn't belong to the N most frequent callees (N =
+            // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too
+            // small.
+            if (i >= Options::maxPolymorphicCallVariantsForInlining()
+                || edge.count() < Options::frequentCallThreshold())
+                totalCallsToUnknown += edge.count();
+            else {
+                totalCallsToKnown += edge.count();
+                variants.append(edge.callee());
+            }
+        }
+        
+        // Bail if we didn't find any calls that qualified.
+        RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size());
+        if (variants.isEmpty())
+            return takesSlowPath();
+        
+        // We require that the distribution of callees is skewed towards a handful of common ones.
+        if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate())
+            return takesSlowPath();
+        
+        RELEASE_ASSERT(totalCallsToKnown);
+        RELEASE_ASSERT(variants.size());
+        
+        CallLinkStatus result;
+        result.m_variants = variants;
+        result.m_couldTakeSlowPath = !!totalCallsToUnknown;
+        result.m_isBasedOnStub = true;
+        return result;
+    }
+    
+    CallLinkStatus result;
+    
+    if (JSFunction* target = callLinkInfo.lastSeenCallee()) {
+        CallVariant variant(target);
+        if (callLinkInfo.hasSeenClosure())
+            variant = variant.despecifiedClosure();
+        result.m_variants.append(variant);
+    }
+    
+    result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount();
+
+    return result;
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
+    ExitSiteData exitSiteData)
+{
+    CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+    if (exitSiteData.badFunction) {
+        if (result.isBasedOnStub()) {
+            // If we have a polymorphic stub, then having an exit site is not quite so useful. In
+            // most cases, the information in the stub has higher fidelity.
+            result.makeClosureCall();
+        } else {
+            // We might not have a polymorphic stub for any number of reasons. When this happens, we
+            // are in less certain territory, so exit sites mean a lot.
+            result.m_couldTakeSlowPath = true;
+        }
+    }
+    if (exitSiteData.takesSlowPath)
+        result.m_couldTakeSlowPath = true;
+    
+    return result;
+}
+#endif
+
+void CallLinkStatus::computeDFGStatuses(
+    CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map)
+{
+#if ENABLE(DFG_JIT)
+    RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT);
+    CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative();
+    for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) {
+        CallLinkInfo& info = **iter;
+        if (info.isDirect()) {
+            // If the DFG was able to get a direct call then probably so will we. However, there is
+            // a remote chance that it's bad news to lose information about what the DFG did. We'd
+            // ideally like to just know that the DFG had emitted a DirectCall.
+            continue;
+        }
+        CodeOrigin codeOrigin = info.codeOrigin();
+        
+        // Check if we had already previously made a terrible mistake in the FTL for this
+        // code origin. Note that this is approximate because we could have a monovariant
+        // inline in the FTL that ended up failing. We should fix that at some point by
+        // having data structures to track the context of frequent exits. This is currently
+        // challenging because it would require creating a CodeOrigin-based database in
+        // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the
+        // InlineCallFrames.
+        CodeBlock* currentBaseline =
+            baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+        ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(currentBaseline->m_lock);
+            exitSiteData = computeExitSiteData(
+                locker, currentBaseline, codeOrigin.bytecodeIndex);
+        }
+        
+        {
+            ConcurrentJSLocker locker(dfgCodeBlock->m_lock);
+            map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData));
+        }
+    }
+#else
+    UNUSED_PARAM(dfgCodeBlock);
+#endif // ENABLE(DFG_JIT)
+    
+    if (verbose) {
+        dataLog("Context map:\n");
+        ContextMap::iterator iter = map.begin();
+        ContextMap::iterator end = map.end();
+        for (; iter != end; ++iter) {
+            dataLog("    ", iter->key, ":\n");
+            dataLog("        ", iter->value, "\n");
+        }
+    }
+}
+
+CallLinkStatus CallLinkStatus::computeFor(
+    CodeBlock* profiledBlock, CodeOrigin codeOrigin,
+    const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap)
+{
+    auto iter = dfgMap.find(codeOrigin);
+    if (iter != dfgMap.end())
+        return iter->value;
+    
+    return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
+}
+
+void CallLinkStatus::setProvenConstantCallee(CallVariant variant)
+{
+    m_variants = CallVariantList{ variant };
+    m_couldTakeSlowPath = false;
+    m_isProved = true;
+}
+
+bool CallLinkStatus::isClosureCall() const
+{
+    for (unsigned i = m_variants.size(); i--;) {
+        if (m_variants[i].isClosureCall())
+            return true;
+    }
+    return false;
+}
+
+void CallLinkStatus::makeClosureCall()
+{
+    m_variants = despecifiedVariantList(m_variants);
+}
+
+void CallLinkStatus::dump(PrintStream& out) const
+{
+    if (!isSet()) {
+        out.print("Not Set");
+        return;
+    }
+    
+    CommaPrinter comma;
+    
+    if (m_isProved)
+        out.print(comma, "Statically Proved");
+    
+    if (m_couldTakeSlowPath)
+        out.print(comma, "Could Take Slow Path");
+    
+    if (m_isBasedOnStub)
+        out.print(comma, "Based On Stub");
+    
+    if (!m_variants.isEmpty())
+        out.print(comma, listDump(m_variants));
+    
+    if (m_maxNumArguments)
+        out.print(comma, "maxNumArguments = ", m_maxNumArguments);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/CallLinkStatus.h b/bytecode/CallLinkStatus.h
new file mode 100644
index 0000000..353deaa
--- /dev/null
+++ b/bytecode/CallLinkStatus.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallLinkInfo.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
+#include "CodeSpecializationKind.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
+#include "Intrinsic.h"
+#include "JSCJSValue.h"
+
+namespace JSC {
+
+class CodeBlock;
+class InternalFunction;
+class JSFunction;
+class Structure;
+class CallLinkInfo;
+
+class CallLinkStatus {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    CallLinkStatus()
+    {
+    }
+    
+    static CallLinkStatus takesSlowPath()
+    {
+        CallLinkStatus result;
+        result.m_couldTakeSlowPath = true;
+        return result;
+    }
+    
+    explicit CallLinkStatus(JSValue);
+    
+    CallLinkStatus(CallVariant variant)
+        : m_variants(1, variant)
+    {
+    }
+    
+    static CallLinkStatus computeFor(
+        CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&);
+
+    struct ExitSiteData {
+        bool takesSlowPath { false };
+        bool badFunction { false };
+    };
+    static ExitSiteData computeExitSiteData(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+    
+#if ENABLE(JIT)
+    // Computes the status assuming that we never took slow path and never previously
+    // exited.
+    static CallLinkStatus computeFor(const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&);
+    static CallLinkStatus computeFor(
+        const ConcurrentJSLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData);
+#endif
+    
+    typedef HashMap ContextMap;
+    
+    // Computes all of the statuses of the DFG code block. Doesn't include statuses that had
+    // no information. Currently we use this when compiling FTL code, to enable polyvariant
+    // inlining.
+    static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&);
+    
+    // Helper that first consults the ContextMap and then does computeFor().
+    static CallLinkStatus computeFor(
+        CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&);
+    
+    void setProvenConstantCallee(CallVariant);
+    
+    bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; }
+    
+    bool operator!() const { return !isSet(); }
+    
+    bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
+    
+    void setCouldTakeSlowPath(bool value) { m_couldTakeSlowPath = value; }
+    
+    CallVariantList variants() const { return m_variants; }
+    unsigned size() const { return m_variants.size(); }
+    CallVariant at(unsigned i) const { return m_variants[i]; }
+    CallVariant operator[](unsigned i) const { return at(i); }
+    bool isProved() const { return m_isProved; }
+    bool isBasedOnStub() const { return m_isBasedOnStub; }
+    bool canOptimize() const { return !m_variants.isEmpty(); }
+
+    bool isClosureCall() const; // Returns true if any callee is a closure call.
+    
+    unsigned maxNumArguments() const { return m_maxNumArguments; }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    void makeClosureCall();
+    
+    static CallLinkStatus computeFromLLInt(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#if ENABLE(JIT)
+    static CallLinkStatus computeFromCallLinkInfo(
+        const ConcurrentJSLocker&, CallLinkInfo&);
+#endif
+    
+    CallVariantList m_variants;
+    bool m_couldTakeSlowPath { false };
+    bool m_isProved { false };
+    bool m_isBasedOnStub { false };
+    unsigned m_maxNumArguments { 0 };
+};
+
+} // namespace JSC
diff --git a/bytecode/CallMode.cpp b/bytecode/CallMode.cpp
new file mode 100644
index 0000000..5757b18
--- /dev/null
+++ b/bytecode/CallMode.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallMode.h"
+
+#include 
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CallMode callMode)
+{
+    switch (callMode) {
+    case JSC::CallMode::Tail:
+        out.print("TailCall");
+        return;
+    case JSC::CallMode::Regular:
+        out.print("Call");
+        return;
+    case JSC::CallMode::Construct:
+        out.print("Construct");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/bytecode/CallMode.h b/bytecode/CallMode.h
new file mode 100644
index 0000000..02d90e1
--- /dev/null
+++ b/bytecode/CallMode.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+enum class CallMode { Regular, Tail, Construct };
+
+enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+
+inline CodeSpecializationKind specializationKindFor(CallMode callMode)
+{
+    if (callMode == CallMode::Construct)
+        return CodeForConstruct;
+
+    return CodeForCall;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CallMode);
+
+} // namespace WTF
diff --git a/bytecode/CallReturnOffsetToBytecodeOffset.h b/bytecode/CallReturnOffsetToBytecodeOffset.h
new file mode 100644
index 0000000..2d1b00c
--- /dev/null
+++ b/bytecode/CallReturnOffsetToBytecodeOffset.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+#if ENABLE(JIT)
+// This structure is used to map from a call return location
+// (given as an offset in bytes into the JIT code) back to
+// the bytecode index of the corresponding bytecode operation.
+// This is then used to look up the corresponding handler.
+// FIXME: This should be made inlining aware! Currently it isn't
+// because we never inline code that has exception handlers.
+struct CallReturnOffsetToBytecodeOffset {
+    CallReturnOffsetToBytecodeOffset(unsigned callReturnOffset, unsigned bytecodeOffset)
+        : callReturnOffset(callReturnOffset)
+        , bytecodeOffset(bytecodeOffset)
+    {
+    }
+
+    unsigned callReturnOffset;
+    unsigned bytecodeOffset;
+};
+
+inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc)
+{
+    return pc->callReturnOffset;
+}
+#endif
+
+} // namespace JSC
diff --git a/bytecode/CallVariant.cpp b/bytecode/CallVariant.cpp
new file mode 100644
index 0000000..9745dde
--- /dev/null
+++ b/bytecode/CallVariant.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CallVariant.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+void CallVariant::dump(PrintStream& out) const
+{
+    if (!*this) {
+        out.print("null");
+        return;
+    }
+    
+    if (InternalFunction* internalFunction = this->internalFunction()) {
+        out.print("InternalFunction: ", JSValue(internalFunction));
+        return;
+    }
+    
+    if (JSFunction* function = this->function()) {
+        out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")");
+        return;
+    }
+    
+    out.print("Executable: ", *executable());
+}
+
+CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd)
+{
+    ASSERT(variantToAdd);
+    CallVariantList result;
+    for (CallVariant variant : list) {
+        ASSERT(variant);
+        if (!!variantToAdd) {
+            if (variant == variantToAdd)
+                variantToAdd = CallVariant();
+            else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) {
+                variant = variant.despecifiedClosure();
+                variantToAdd = CallVariant();
+            }
+        }
+        result.append(variant);
+    }
+    if (!!variantToAdd)
+        result.append(variantToAdd);
+    
+    if (!ASSERT_DISABLED) {
+        for (unsigned i = 0; i < result.size(); ++i) {
+            for (unsigned j = i + 1; j < result.size(); ++j) {
+                if (result[i] != result[j])
+                    continue;
+                
+                dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n");
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    }
+    
+    return result;
+}
+
+CallVariantList despecifiedVariantList(const CallVariantList& list)
+{
+    CallVariantList result;
+    for (CallVariant variant : list)
+        result = variantListWithVariant(result, variant.despecifiedClosure());
+    return result;
+}
+
+} // namespace JSC
+
diff --git a/bytecode/CallVariant.h b/bytecode/CallVariant.h
new file mode 100644
index 0000000..cb288c6
--- /dev/null
+++ b/bytecode/CallVariant.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "FunctionExecutable.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "NativeExecutable.h"
+
+namespace JSC {
+
+// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking
+// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal
+// non-function objects that nevertheless provide call traps, the call machinery wants to see a
+// callee in one of the following four forms:
+//
+// JSFunction callee: This means that we expect the callsite to always call a particular function
+//     instance, that is associated with a particular lexical environment. This pinpoints not
+//     just the code that will be called (i.e. the executable) but also the scope within which
+//     the code runs.
+//
+// Executable callee: This corresponds to a call to a closure. In this case, we know that the
+//     callsite will call a JSFunction, but we do not know which particular JSFunction. We do know
+//     what code will be called - i.e. we know the executable.
+//
+// InternalFunction callee: JSC supports a special kind of native functions that support bizarre
+//     semantics. These are always singletons. If we know that the callee is an InternalFunction
+//     then we know both the code that will be called and the scope; in fact the "scope" is really
+//     just the InternalFunction itself.
+//
+// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports
+//     bizarre object callees, but it can't really tell you anything interesting about them other
+//     than the fact that they don't fall into any of the above categories.
+//
+// This class serves as a kind of union over these four things. It does so by just holding a
+// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we
+// cannot use WriteBarrier<> here because this gets used inside the compiler.
+
+class CallVariant {
+public:
+    explicit CallVariant(JSCell* callee = nullptr)
+        : m_callee(callee)
+    {
+    }
+    
+    CallVariant(WTF::HashTableDeletedValueType)
+        : m_callee(deletedToken())
+    {
+    }
+    
+    bool operator!() const { return !m_callee; }
+    
+    // If this variant refers to a function, change it to refer to its executable.
+    ALWAYS_INLINE CallVariant despecifiedClosure() const
+    {
+        if (m_callee->type() == JSFunctionType)
+            return CallVariant(jsCast(m_callee)->executable());
+        return *this;
+    }
+    
+    JSCell* rawCalleeCell() const { return m_callee; }
+    
+    InternalFunction* internalFunction() const
+    {
+        return jsDynamicCast(m_callee);
+    }
+    
+    JSFunction* function() const
+    {
+        return jsDynamicCast(m_callee);
+    }
+    
+    bool isClosureCall() const { return !!jsDynamicCast(m_callee); }
+    
+    ExecutableBase* executable() const
+    {
+        if (JSFunction* function = this->function())
+            return function->executable();
+        return jsDynamicCast(m_callee);
+    }
+    
+    JSCell* nonExecutableCallee() const
+    {
+        RELEASE_ASSERT(!isClosureCall());
+        return m_callee;
+    }
+    
+    Intrinsic intrinsicFor(CodeSpecializationKind kind) const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return executable->intrinsicFor(kind);
+        return NoIntrinsic;
+    }
+    
+    FunctionExecutable* functionExecutable() const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return jsDynamicCast(executable);
+        return nullptr;
+    }
+
+    NativeExecutable* nativeExecutable() const
+    {
+        if (ExecutableBase* executable = this->executable())
+            return jsDynamicCast(executable);
+        return nullptr;
+    }
+
+    const DOMJIT::Signature* signatureFor(CodeSpecializationKind kind) const
+    {
+        if (NativeExecutable* nativeExecutable = this->nativeExecutable())
+            return nativeExecutable->signatureFor(kind);
+        return nullptr;
+    }
+    
+    void dump(PrintStream& out) const;
+    
+    bool isHashTableDeletedValue() const
+    {
+        return m_callee == deletedToken();
+    }
+    
+    bool operator==(const CallVariant& other) const
+    {
+        return m_callee == other.m_callee;
+    }
+    
+    bool operator!=(const CallVariant& other) const
+    {
+        return !(*this == other);
+    }
+    
+    bool operator<(const CallVariant& other) const
+    {
+        return m_callee < other.m_callee;
+    }
+    
+    bool operator>(const CallVariant& other) const
+    {
+        return other < *this;
+    }
+    
+    bool operator<=(const CallVariant& other) const
+    {
+        return !(*this < other);
+    }
+    
+    bool operator>=(const CallVariant& other) const
+    {
+        return other <= *this;
+    }
+    
+    unsigned hash() const
+    {
+        return WTF::PtrHash::hash(m_callee);
+    }
+    
+private:
+    static JSCell* deletedToken() { return bitwise_cast(static_cast(1)); }
+    
+    JSCell* m_callee;
+};
+
+struct CallVariantHash {
+    static unsigned hash(const CallVariant& key) { return key.hash(); }
+    static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+typedef Vector CallVariantList;
+
+// Returns a new variant list by attempting to either append the given variant or merge it with one
+// of the variants we already have by despecifying closures.
+CallVariantList variantListWithVariant(const CallVariantList&, CallVariant);
+
+// Returns a new list where every element is despecified, and the list is deduplicated.
+CallVariantList despecifiedVariantList(const CallVariantList&);
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::CallVariantHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/bytecode/CodeBlock.cpp b/bytecode/CodeBlock.cpp
new file mode 100644
index 0000000..d2566a7
--- /dev/null
+++ b/bytecode/CodeBlock.cpp
@@ -0,0 +1,4573 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CodeBlock.h"
+
+#include "ArithProfile.h"
+#include "BasicBlockLocation.h"
+#include "BytecodeGenerator.h"
+#include "BytecodeLivenessAnalysis.h"
+#include "BytecodeUseDef.h"
+#include "CallLinkStatus.h"
+#include "CodeBlockSet.h"
+#include "DFGCapabilities.h"
+#include "DFGCommon.h"
+#include "DFGDriver.h"
+#include "DFGJITCode.h"
+#include "DFGWorklist.h"
+#include "Debugger.h"
+#include "EvalCodeBlock.h"
+#include "FunctionCodeBlock.h"
+#include "FunctionExecutableDump.h"
+#include "GetPutInfo.h"
+#include "InlineCallFrame.h"
+#include "Interpreter.h"
+#include "JIT.h"
+#include "JITMathIC.h"
+#include "JSCInlines.h"
+#include "JSCJSValue.h"
+#include "JSFunction.h"
+#include "JSLexicalEnvironment.h"
+#include "JSModuleEnvironment.h"
+#include "LLIntData.h"
+#include "LLIntEntrypoint.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
+#include "LowLevelInterpreter.h"
+#include "ModuleProgramCodeBlock.h"
+#include "PCToCodeOriginMap.h"
+#include "PolymorphicAccess.h"
+#include "ProfilerDatabase.h"
+#include "ProgramCodeBlock.h"
+#include "ReduceWhitespace.h"
+#include "Repatch.h"
+#include "SlotVisitorInlines.h"
+#include "StackVisitor.h"
+#include "StructureStubInfo.h"
+#include "TypeLocationCache.h"
+#include "TypeProfiler.h"
+#include "UnlinkedInstructionStream.h"
+#include "VMInlines.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#if ENABLE(JIT)
+#include "RegisterAtOffsetList.h"
+#endif
+
+#if ENABLE(DFG_JIT)
+#include "DFGOperations.h"
+#endif
+
+#if ENABLE(FTL_JIT)
+#include "FTLJITCode.h"
+#endif
+
+namespace JSC {
+
+const ClassInfo CodeBlock::s_info = {
+    "CodeBlock", 0, 0,
+    CREATE_METHOD_TABLE(CodeBlock)
+};
+
+CString CodeBlock::inferredName() const
+{
+    switch (codeType()) {
+    case GlobalCode:
+        return "";
+    case EvalCode:
+        return "";
+    case FunctionCode:
+        return jsCast(ownerExecutable())->inferredName().utf8();
+    case ModuleCode:
+        return "";
+    default:
+        CRASH();
+        return CString("", 0);
+    }
+}
+
+bool CodeBlock::hasHash() const
+{
+    return !!m_hash;
+}
+
+bool CodeBlock::isSafeToComputeHash() const
+{
+    return !isCompilationThread();
+}
+
+CodeBlockHash CodeBlock::hash() const
+{
+    if (!m_hash) {
+        RELEASE_ASSERT(isSafeToComputeHash());
+        m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
+    }
+    return m_hash;
+}
+
+CString CodeBlock::sourceCodeForTools() const
+{
+    if (codeType() != FunctionCode)
+        return ownerScriptExecutable()->source().toUTF8();
+    
+    SourceProvider* provider = source();
+    FunctionExecutable* executable = jsCast(ownerExecutable());
+    UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
+    unsigned unlinkedStartOffset = unlinked->startOffset();
+    unsigned linkedStartOffset = executable->source().startOffset();
+    int delta = linkedStartOffset - unlinkedStartOffset;
+    unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
+    unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
+    return toCString(
+        "function ",
+        provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
+}
+
+CString CodeBlock::sourceCodeOnOneLine() const
+{
+    return reduceWhitespace(sourceCodeForTools());
+}
+
+CString CodeBlock::hashAsStringIfPossible() const
+{
+    if (hasHash() || isSafeToComputeHash())
+        return toCString(hash());
+    return "";
+}
+
+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
+{
+    out.print(inferredName(), "#", hashAsStringIfPossible());
+    out.print(":[", RawPointer(this), "->");
+    if (!!m_alternative)
+        out.print(RawPointer(alternative()), "->");
+    out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
+
+    if (codeType() == FunctionCode)
+        out.print(specializationKind());
+    out.print(", ", instructionCount());
+    if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
+        out.print(" (ShouldAlwaysBeInlined)");
+    if (ownerScriptExecutable()->neverInline())
+        out.print(" (NeverInline)");
+    if (ownerScriptExecutable()->neverOptimize())
+        out.print(" (NeverOptimize)");
+    else if (ownerScriptExecutable()->neverFTLOptimize())
+        out.print(" (NeverFTLOptimize)");
+    if (ownerScriptExecutable()->didTryToEnterInLoop())
+        out.print(" (DidTryToEnterInLoop)");
+    if (ownerScriptExecutable()->isStrictMode())
+        out.print(" (StrictMode)");
+    if (m_didFailJITCompilation)
+        out.print(" (JITFail)");
+    if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
+        out.print(" (FTLFail)");
+    if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
+        out.print(" (HadFTLReplacement)");
+    out.print("]");
+}
+
+void CodeBlock::dump(PrintStream& out) const
+{
+    dumpAssumingJITType(out, jitType());
+}
+
+static CString idName(int id0, const Identifier& ident)
+{
+    return toCString(ident.impl(), "(@id", id0, ")");
+}
+
+CString CodeBlock::registerName(int r) const
+{
+    if (isConstantRegisterIndex(r))
+        return constantName(r);
+
+    return toCString(VirtualRegister(r));
+}
+
+CString CodeBlock::constantName(int index) const
+{
+    JSValue value = getConstant(index);
+    return toCString(value, "(", VirtualRegister(index), ")");
+}
+
+static CString regexpToSourceString(RegExp* regExp)
+{
+    char postfix[5] = { '/', 0, 0, 0, 0 };
+    int index = 1;
+    if (regExp->global())
+        postfix[index++] = 'g';
+    if (regExp->ignoreCase())
+        postfix[index++] = 'i';
+    if (regExp->multiline())
+        postfix[index] = 'm';
+    if (regExp->sticky())
+        postfix[index++] = 'y';
+    if (regExp->unicode())
+        postfix[index++] = 'u';
+
+    return toCString("/", regExp->pattern().impl(), postfix);
+}
+
+static CString regexpName(int re, RegExp* regexp)
+{
+    return toCString(regexpToSourceString(regexp), "(@re", re, ")");
+}
+
+NEVER_INLINE static const char* debugHookName(int debugHookType)
+{
+    switch (static_cast(debugHookType)) {
+        case DidEnterCallFrame:
+            return "didEnterCallFrame";
+        case WillLeaveCallFrame:
+            return "willLeaveCallFrame";
+        case WillExecuteStatement:
+            return "willExecuteStatement";
+        case WillExecuteExpression:
+            return "willExecuteExpression";
+        case WillExecuteProgram:
+            return "willExecuteProgram";
+        case DidExecuteProgram:
+            return "didExecuteProgram";
+        case DidReachBreakpoint:
+            return "didReachBreakpoint";
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return "";
+}
+
+void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
+{
+    int r0 = (++it)->u.operand;
+    int r1 = (++it)->u.operand;
+
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+}
+
+void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
+{
+    int r0 = (++it)->u.operand;
+    int r1 = (++it)->u.operand;
+    int r2 = (++it)->u.operand;
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+}
+
+void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
+{
+    int r0 = (++it)->u.operand;
+    int offset = (++it)->u.operand;
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
+}
+
+void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
+{
+    const char* op;
+    switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+    case op_get_by_id:
+        op = "get_by_id";
+        break;
+    case op_get_by_id_proto_load:
+        op = "get_by_id_proto_load";
+        break;
+    case op_get_by_id_unset:
+        op = "get_by_id_unset";
+        break;
+    case op_get_array_length:
+        op = "array_length";
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
+        op = 0;
+#endif
+    }
+    int r0 = (++it)->u.operand;
+    int r1 = (++it)->u.operand;
+    int id0 = (++it)->u.operand;
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+    it += 4; // Increment up to the value profiler.
+}
+
+static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
+{
+    if (!structure)
+        return;
+    
+    out.printf("%s = %p", name, structure);
+    
+    PropertyOffset offset = structure->getConcurrently(ident.impl());
+    if (offset != invalidOffset)
+        out.printf(" (offset = %d)", offset);
+}
+
+static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
+{
+    out.printf("chain = %p: [", chain);
+    bool first = true;
+    for (WriteBarrier* currentStructure = chain->head();
+         *currentStructure;
+         ++currentStructure) {
+        if (first)
+            first = false;
+        else
+            out.printf(", ");
+        dumpStructure(out, "struct", currentStructure->get(), ident);
+    }
+    out.printf("]");
+}
+
+void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
+{
+    Instruction* instruction = instructions().begin() + location;
+
+    const Identifier& ident = identifier(instruction[3].u.operand);
+    
+    UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+    
+    if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
+        out.printf(" llint(array_length)");
+    else if (StructureID structureID = instruction[4].u.structureID) {
+        Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+        out.printf(" llint(");
+        dumpStructure(out, "struct", structure, ident);
+        out.printf(")");
+        if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_by_id_proto_load)
+            out.printf(" proto(%p)", instruction[6].u.pointer);
+    }
+
+#if ENABLE(JIT)
+    if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+        StructureStubInfo& stubInfo = *stubPtr;
+        if (stubInfo.resetByGC)
+            out.print(" (Reset By GC)");
+        
+        out.printf(" jit(");
+            
+        Structure* baseStructure = nullptr;
+        PolymorphicAccess* stub = nullptr;
+            
+        switch (stubInfo.cacheType) {
+        case CacheType::GetByIdSelf:
+            out.printf("self");
+            baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
+            break;
+        case CacheType::Stub:
+            out.printf("stub");
+            stub = stubInfo.u.stub;
+            break;
+        case CacheType::Unset:
+            out.printf("unset");
+            break;
+        case CacheType::ArrayLength:
+            out.printf("ArrayLength");
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+            
+        if (baseStructure) {
+            out.printf(", ");
+            dumpStructure(out, "struct", baseStructure, ident);
+        }
+
+        if (stub)
+            out.print(", ", *stub);
+
+        out.printf(")");
+    }
+#else
+    UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
+{
+    Instruction* instruction = instructions().begin() + location;
+
+    const Identifier& ident = identifier(instruction[2].u.operand);
+    
+    UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
+
+    out.print(", ", instruction[8].u.putByIdFlags);
+    
+    if (StructureID structureID = instruction[4].u.structureID) {
+        Structure* structure = m_vm->heap.structureIDTable().get(structureID);
+        out.print(" llint(");
+        if (StructureID newStructureID = instruction[6].u.structureID) {
+            Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
+            dumpStructure(out, "prev", structure, ident);
+            out.print(", ");
+            dumpStructure(out, "next", newStructure, ident);
+            if (StructureChain* chain = instruction[7].u.structureChain.get()) {
+                out.print(", ");
+                dumpChain(out, chain, ident);
+            }
+        } else
+            dumpStructure(out, "struct", structure, ident);
+        out.print(")");
+    }
+
+#if ENABLE(JIT)
+    if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
+        StructureStubInfo& stubInfo = *stubPtr;
+        if (stubInfo.resetByGC)
+            out.print(" (Reset By GC)");
+        
+        out.printf(" jit(");
+        
+        switch (stubInfo.cacheType) {
+        case CacheType::PutByIdReplace:
+            out.print("replace, ");
+            dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
+            break;
+        case CacheType::Stub: {
+            out.print("stub, ", *stubInfo.u.stub);
+            break;
+        }
+        case CacheType::Unset:
+            out.printf("unset");
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+        out.printf(")");
+    }
+#else
+    UNUSED_PARAM(map);
+#endif
+}
+
+void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
+{
+    int dst = (++it)->u.operand;
+    int func = (++it)->u.operand;
+    int argCount = (++it)->u.operand;
+    int registerOffset = (++it)->u.operand;
+    printLocationAndOp(out, exec, location, it, op);
+    out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset);
+    out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")");
+    if (cacheDumpMode == DumpCaches) {
+        LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
+        if (callLinkInfo->lastSeenCallee) {
+            out.printf(
+                " llint(%p, exec %p)",
+                callLinkInfo->lastSeenCallee.get(),
+                callLinkInfo->lastSeenCallee->executable());
+        }
+#if ENABLE(JIT)
+        if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
+            JSFunction* target = info->lastSeenCallee();
+            if (target)
+                out.printf(" jit(%p, exec %p)", target, target->executable());
+        }
+        
+        if (jitType() != JITCode::FTLJIT)
+            out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
+#else
+        UNUSED_PARAM(map);
+#endif
+    }
+    ++it;
+    ++it;
+    dumpArrayProfiling(out, it, hasPrintedProfiling);
+    dumpValueProfiling(out, it, hasPrintedProfiling);
+}
+
+void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
+{
+    int r0 = (++it)->u.operand;
+    int id0 = (++it)->u.operand;
+    int r1 = (++it)->u.operand;
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
+    it += 5;
+}
+
+void CodeBlock::dumpSource()
+{
+    dumpSource(WTF::dataFile());
+}
+
+void CodeBlock::dumpSource(PrintStream& out)
+{
+    ScriptExecutable* executable = ownerScriptExecutable();
+    if (executable->isFunctionExecutable()) {
+        FunctionExecutable* functionExecutable = reinterpret_cast(executable);
+        StringView source = functionExecutable->source().provider()->getRange(
+            functionExecutable->parametersStartOffset(),
+            functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
+        
+        out.print("function ", inferredName(), source);
+        return;
+    }
+    out.print(executable->source().view());
+}
+
+void CodeBlock::dumpBytecode()
+{
+    dumpBytecode(WTF::dataFile());
+}
+
+void CodeBlock::dumpBytecode(PrintStream& out)
+{
+    // We only use the ExecState* for things that don't actually lead to JS execution,
+    // like converting a JSString to a String. Hence the globalExec is appropriate.
+    ExecState* exec = m_globalObject->globalExec();
+    
+    size_t instructionCount = 0;
+
+    for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
+        ++instructionCount;
+
+    out.print(*this);
+    out.printf(
+        ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
+        static_cast(instructions().size()),
+        static_cast(instructions().size() * sizeof(Instruction)),
+        m_numParameters, m_numCalleeLocals, m_numVars);
+    out.print("; scope at ", scopeRegister());
+    out.printf("\n");
+    
+    StubInfoMap stubInfos;
+    CallLinkInfoMap callLinkInfos;
+    getStubInfoMap(stubInfos);
+    getCallLinkInfoMap(callLinkInfos);
+    
+    const Instruction* begin = instructions().begin();
+    const Instruction* end = instructions().end();
+    for (const Instruction* it = begin; it != end; ++it)
+        dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
+    
+    if (numberOfIdentifiers()) {
+        out.printf("\nIdentifiers:\n");
+        size_t i = 0;
+        do {
+            out.printf("  id%u = %s\n", static_cast(i), identifier(i).string().utf8().data());
+            ++i;
+        } while (i != numberOfIdentifiers());
+    }
+
+    if (!m_constantRegisters.isEmpty()) {
+        out.printf("\nConstants:\n");
+        size_t i = 0;
+        do {
+            const char* sourceCodeRepresentationDescription = nullptr;
+            switch (m_constantsSourceCodeRepresentation[i]) {
+            case SourceCodeRepresentation::Double:
+                sourceCodeRepresentationDescription = ": in source as double";
+                break;
+            case SourceCodeRepresentation::Integer:
+                sourceCodeRepresentationDescription = ": in source as integer";
+                break;
+            case SourceCodeRepresentation::Other:
+                sourceCodeRepresentationDescription = "";
+                break;
+            }
+            out.printf("   k%u = %s%s\n", static_cast(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
+            ++i;
+        } while (i < m_constantRegisters.size());
+    }
+
+    if (size_t count = m_unlinkedCode->numberOfRegExps()) {
+        out.printf("\nm_regexps:\n");
+        size_t i = 0;
+        do {
+            out.printf("  re%u = %s\n", static_cast(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
+            ++i;
+        } while (i < count);
+    }
+
+    dumpExceptionHandlers(out);
+    
+    if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
+        out.printf("Switch Jump Tables:\n");
+        unsigned i = 0;
+        do {
+            out.printf("  %1d = {\n", i);
+            int entry = 0;
+            Vector::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
+            for (Vector::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
+                if (!*iter)
+                    continue;
+                out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
+            }
+            out.printf("      }\n");
+            ++i;
+        } while (i < m_rareData->m_switchJumpTables.size());
+    }
+    
+    if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
+        out.printf("\nString Switch Jump Tables:\n");
+        unsigned i = 0;
+        do {
+            out.printf("  %1d = {\n", i);
+            StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
+            for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
+                out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
+            out.printf("      }\n");
+            ++i;
+        } while (i < m_rareData->m_stringSwitchJumpTables.size());
+    }
+
+    out.printf("\n");
+}
+
+void CodeBlock::dumpExceptionHandlers(PrintStream& out)
+{
+    if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+        out.printf("\nException Handlers:\n");
+        unsigned i = 0;
+        do {
+            HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+            out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
+                i + 1, handler.start, handler.end, handler.target, handler.typeName());
+            ++i;
+        } while (i < m_rareData->m_exceptionHandlers.size());
+    }
+}
+
+void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
+{
+    if (hasPrintedProfiling) {
+        out.print("; ");
+        return;
+    }
+    
+    out.print("    ");
+    hasPrintedProfiling = true;
+}
+
+void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
+{
+    ConcurrentJSLocker locker(m_lock);
+    
+    ++it;
+    CString description = it->u.profile->briefDescription(locker);
+    if (!description.length())
+        return;
+    beginDumpProfiling(out, hasPrintedProfiling);
+    out.print(description);
+}
+
+void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
+{
+    ConcurrentJSLocker locker(m_lock);
+    
+    ++it;
+    if (!it->u.arrayProfile)
+        return;
+    CString description = it->u.arrayProfile->briefDescription(locker, this);
+    if (!description.length())
+        return;
+    beginDumpProfiling(out, hasPrintedProfiling);
+    out.print(description);
+}
+
+void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
+{
+    if (!profile || !profile->m_counter)
+        return;
+
+    beginDumpProfiling(out, hasPrintedProfiling);
+    out.print(name, profile->m_counter);
+}
+
+void CodeBlock::dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)
+{
+    if (!profile)
+        return;
+    
+    beginDumpProfiling(out, hasPrintedProfiling);
+    out.print("results: ", *profile);
+}
+
+void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
+{
+    out.printf("[%4d] %-17s ", location, op);
+}
+
+void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
+{
+    printLocationAndOp(out, exec, location, it, op);
+    out.printf("%s", registerName(operand).data());
+}
+
+void CodeBlock::dumpBytecode(
+    PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
+    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
+{
+    int location = it - begin;
+    bool hasPrintedProfiling = false;
+    OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
+    switch (opcode) {
+        case op_enter: {
+            printLocationAndOp(out, exec, location, it, "enter");
+            break;
+        }
+        case op_get_scope: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
+            break;
+        }
+        case op_create_direct_arguments: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_direct_arguments");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_create_scoped_arguments: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
+        case op_create_cloned_arguments: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_argument_count: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "argument_count", r0);
+            break;
+        }
+        case op_get_argument: {
+            int r0 = (++it)->u.operand;
+            int index = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "argument", r0);
+            out.printf(", %d", index);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_create_rest: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            unsigned argumentOffset = (++it)->u.unsignedValue;
+            printLocationAndOp(out, exec, location, it, "create_rest");
+            out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
+            out.printf("ArgumentsOffset: %u", argumentOffset);
+            break;
+        }
+        case op_get_rest_length: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_rest_length");
+            out.printf("%s, ", registerName(r0).data());
+            unsigned argumentOffset = (++it)->u.unsignedValue;
+            out.printf("ArgumentsOffset: %u", argumentOffset);
+            break;
+        }
+        case op_create_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            unsigned inferredInlineCapacity = (++it)->u.operand;
+            unsigned cachedFunction = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_this");
+            out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
+            break;
+        }
+        case op_to_this: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
+            Structure* structure = (++it)->u.structure.get();
+            if (structure)
+                out.print(", cache(struct = ", RawPointer(structure), ")");
+            out.print(", ", (++it)->u.toThisStatus);
+            break;
+        }
+        case op_check_tdz: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
+            break;
+        }
+        case op_new_object: {
+            int r0 = (++it)->u.operand;
+            unsigned inferredInlineCapacity = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_object");
+            out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
+            ++it; // Skip object allocation profile.
+            break;
+        }
+        case op_new_array: {
+            int dst = (++it)->u.operand;
+            int argv = (++it)->u.operand;
+            int argc = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_array");
+            out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
+            ++it; // Skip array allocation profile.
+            break;
+        }
+        case op_new_array_with_spread: {
+            int dst = (++it)->u.operand;
+            int argv = (++it)->u.operand;
+            int argc = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_array_with_spread");
+            out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc);
+            unsigned bitVectorIndex = (++it)->u.unsignedValue;
+            const BitVector& bitVector = m_unlinkedCode->bitVector(bitVectorIndex);
+            out.print("BitVector:", bitVectorIndex, ":");
+            for (unsigned i = 0; i < static_cast(argc); i++) {
+                if (bitVector.get(i))
+                    out.print("1");
+                else
+                    out.print("0");
+            }
+            break;
+        }
+        case op_spread: {
+            int dst = (++it)->u.operand;
+            int arg = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "spread");
+            out.printf("%s, %s", registerName(dst).data(), registerName(arg).data());
+            break;
+        }
+        case op_new_array_with_size: {
+            int dst = (++it)->u.operand;
+            int length = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_array_with_size");
+            out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
+            ++it; // Skip array allocation profile.
+            break;
+        }
+        case op_new_array_buffer: {
+            int dst = (++it)->u.operand;
+            int argv = (++it)->u.operand;
+            int argc = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_array_buffer");
+            out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
+            ++it; // Skip array allocation profile.
+            break;
+        }
+        case op_new_regexp: {
+            int r0 = (++it)->u.operand;
+            int re0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_regexp");
+            out.printf("%s, ", registerName(r0).data());
+            if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
+                out.printf("%s", regexpName(re0, regexp(re0)).data());
+            else
+                out.printf("bad_regexp(%d)", re0);
+            break;
+        }
+        case op_mov: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "mov");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
+        case op_profile_type: {
+            int r0 = (++it)->u.operand;
+            ++it;
+            ++it;
+            ++it;
+            ++it;
+            printLocationAndOp(out, exec, location, it, "op_profile_type");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_profile_control_flow: {
+            BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
+            printLocationAndOp(out, exec, location, it, "profile_control_flow");
+            out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
+            break;
+        }
+        case op_not: {
+            printUnaryOp(out, exec, location, it, "not");
+            break;
+        }
+        case op_eq: {
+            printBinaryOp(out, exec, location, it, "eq");
+            break;
+        }
+        case op_eq_null: {
+            printUnaryOp(out, exec, location, it, "eq_null");
+            break;
+        }
+        case op_neq: {
+            printBinaryOp(out, exec, location, it, "neq");
+            break;
+        }
+        case op_neq_null: {
+            printUnaryOp(out, exec, location, it, "neq_null");
+            break;
+        }
+        case op_stricteq: {
+            printBinaryOp(out, exec, location, it, "stricteq");
+            break;
+        }
+        case op_nstricteq: {
+            printBinaryOp(out, exec, location, it, "nstricteq");
+            break;
+        }
+        case op_less: {
+            printBinaryOp(out, exec, location, it, "less");
+            break;
+        }
+        case op_lesseq: {
+            printBinaryOp(out, exec, location, it, "lesseq");
+            break;
+        }
+        case op_greater: {
+            printBinaryOp(out, exec, location, it, "greater");
+            break;
+        }
+        case op_greatereq: {
+            printBinaryOp(out, exec, location, it, "greatereq");
+            break;
+        }
+        case op_inc: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
+            break;
+        }
+        case op_dec: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
+            break;
+        }
+        case op_to_number: {
+            printUnaryOp(out, exec, location, it, "to_number");
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_to_string: {
+            printUnaryOp(out, exec, location, it, "to_string");
+            break;
+        }
+        case op_negate: {
+            printUnaryOp(out, exec, location, it, "negate");
+            ++it; // op_negate has an extra operand for the ArithProfile.
+            break;
+        }
+        case op_add: {
+            printBinaryOp(out, exec, location, it, "add");
+            ++it;
+            break;
+        }
+        case op_mul: {
+            printBinaryOp(out, exec, location, it, "mul");
+            ++it;
+            break;
+        }
+        case op_div: {
+            printBinaryOp(out, exec, location, it, "div");
+            ++it;
+            break;
+        }
+        case op_mod: {
+            printBinaryOp(out, exec, location, it, "mod");
+            break;
+        }
+        case op_pow: {
+            printBinaryOp(out, exec, location, it, "pow");
+            break;
+        }
+        case op_sub: {
+            printBinaryOp(out, exec, location, it, "sub");
+            ++it;
+            break;
+        }
+        case op_lshift: {
+            printBinaryOp(out, exec, location, it, "lshift");
+            break;            
+        }
+        case op_rshift: {
+            printBinaryOp(out, exec, location, it, "rshift");
+            break;
+        }
+        case op_urshift: {
+            printBinaryOp(out, exec, location, it, "urshift");
+            break;
+        }
+        case op_bitand: {
+            printBinaryOp(out, exec, location, it, "bitand");
+            ++it;
+            break;
+        }
+        case op_bitxor: {
+            printBinaryOp(out, exec, location, it, "bitxor");
+            ++it;
+            break;
+        }
+        case op_bitor: {
+            printBinaryOp(out, exec, location, it, "bitor");
+            ++it;
+            break;
+        }
+        case op_overrides_has_instance: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "overrides_has_instance");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            break;
+        }
+        case op_instanceof: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "instanceof");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            break;
+        }
+        case op_instanceof_custom: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "instanceof_custom");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            break;
+        }
+        case op_unsigned: {
+            printUnaryOp(out, exec, location, it, "unsigned");
+            break;
+        }
+        case op_typeof: {
+            printUnaryOp(out, exec, location, it, "typeof");
+            break;
+        }
+        case op_is_empty: {
+            printUnaryOp(out, exec, location, it, "is_empty");
+            break;
+        }
+        case op_is_undefined: {
+            printUnaryOp(out, exec, location, it, "is_undefined");
+            break;
+        }
+        case op_is_boolean: {
+            printUnaryOp(out, exec, location, it, "is_boolean");
+            break;
+        }
+        case op_is_number: {
+            printUnaryOp(out, exec, location, it, "is_number");
+            break;
+        }
+        case op_is_cell_with_type: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int type = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "is_cell_with_type");
+            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type);
+            break;
+        }
+        case op_is_object: {
+            printUnaryOp(out, exec, location, it, "is_object");
+            break;
+        }
+        case op_is_object_or_null: {
+            printUnaryOp(out, exec, location, it, "is_object_or_null");
+            break;
+        }
+        case op_is_function: {
+            printUnaryOp(out, exec, location, it, "is_function");
+            break;
+        }
+        case op_in: {
+            printBinaryOp(out, exec, location, it, "in");
+            break;
+        }
+        case op_try_get_by_id: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "try_get_by_id");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_get_by_id:
+        case op_get_by_id_proto_load:
+        case op_get_by_id_unset:
+        case op_get_array_length: {
+            printGetByIdOp(out, exec, location, it);
+            printGetByIdCacheStatus(out, exec, location, stubInfos);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_get_by_id_with_this: {
+            printLocationAndOp(out, exec, location, it, "get_by_id_with_this");
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data());
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_get_by_val_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_by_val_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_by_id: {
+            printPutByIdOp(out, exec, location, it, "put_by_id");
+            printPutByIdCacheStatus(out, location, stubInfos);
+            break;
+        }
+        case op_put_by_id_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_id_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data());
+            break;
+        }
+        case op_put_by_val_with_this: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_val_with_this");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            break;
+        }
+        case op_put_getter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_by_id");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
+            break;
+        }
+        case op_put_setter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_setter_by_id");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
+            break;
+        }
+        case op_put_getter_setter_by_id: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
+            out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
+            break;
+        }
+        case op_put_getter_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_getter_by_val");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
+            break;
+        }
+        case op_put_setter_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int n0 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_setter_by_val");
+            out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
+            break;
+        }
+        case op_define_data_property: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "define_data_property");
+            out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
+            break;
+        }
+        case op_define_accessor_property: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            int r3 = (++it)->u.operand;
+            int r4 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "define_accessor_property");
+            out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data());
+            break;
+        }
+        case op_del_by_id: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "del_by_id");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
+            break;
+        }
+        case op_get_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_by_val");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            dumpArrayProfiling(out, it, hasPrintedProfiling);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_val");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            dumpArrayProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_by_val_direct: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_val_direct");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            dumpArrayProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_del_by_val: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int r2 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "del_by_val");
+            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
+            break;
+        }
+        case op_put_by_index: {
+            int r0 = (++it)->u.operand;
+            unsigned n0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_by_index");
+            out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
+            break;
+        }
+        case op_jmp: {
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jmp");
+            out.printf("%d(->%d)", offset, location + offset);
+            break;
+        }
+        case op_jtrue: {
+            printConditionalJump(out, exec, begin, it, location, "jtrue");
+            break;
+        }
+        case op_jfalse: {
+            printConditionalJump(out, exec, begin, it, location, "jfalse");
+            break;
+        }
+        case op_jeq_null: {
+            printConditionalJump(out, exec, begin, it, location, "jeq_null");
+            break;
+        }
+        case op_jneq_null: {
+            printConditionalJump(out, exec, begin, it, location, "jneq_null");
+            break;
+        }
+        case op_jneq_ptr: {
+            int r0 = (++it)->u.operand;
+            Special::Pointer pointer = (++it)->u.specialPointer;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jneq_ptr");
+            out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
+            ++it;
+            break;
+        }
+        case op_jless: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jless");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jlesseq: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jlesseq");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jgreater: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jgreater");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jgreatereq: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jgreatereq");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jnless: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jnless");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jnlesseq: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jnlesseq");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jngreater: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jngreater");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_jngreatereq: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "jngreatereq");
+            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
+            break;
+        }
+        case op_loop_hint: {
+            printLocationAndOp(out, exec, location, it, "loop_hint");
+            break;
+        }
+        case op_watchdog: {
+            printLocationAndOp(out, exec, location, it, "watchdog");
+            break;
+        }
+        case op_log_shadow_chicken_prologue: {
+            int r0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
+            out.printf("%s", registerName(r0).data());
+            break;
+        }
+        case op_log_shadow_chicken_tail: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
+        case op_switch_imm: {
+            int tableIndex = (++it)->u.operand;
+            int defaultTarget = (++it)->u.operand;
+            int scrutineeRegister = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "switch_imm");
+            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
+            break;
+        }
+        case op_switch_char: {
+            int tableIndex = (++it)->u.operand;
+            int defaultTarget = (++it)->u.operand;
+            int scrutineeRegister = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "switch_char");
+            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
+            break;
+        }
+        case op_switch_string: {
+            int tableIndex = (++it)->u.operand;
+            int defaultTarget = (++it)->u.operand;
+            int scrutineeRegister = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "switch_string");
+            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
+            break;
+        }
+        case op_new_func: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_func");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_generator_func: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_generator_func");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_async_func: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_async_func");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_func_exp: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_func_exp");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_generator_func_exp: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_new_async_func_exp: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int f0 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "new_async_func_exp");
+            out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
+            break;
+        }
+        case op_set_function_name: {
+            int funcReg = (++it)->u.operand;
+            int nameReg = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "set_function_name");
+            out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
+            break;
+        }
+        case op_call: {
+            printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
+        case op_tail_call: {
+            printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
+        case op_call_eval: {
+            printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
+            
+        case op_construct_varargs:
+        case op_call_varargs:
+        case op_tail_call_varargs:
+        case op_tail_call_forward_arguments: {
+            int result = (++it)->u.operand;
+            int callee = (++it)->u.operand;
+            int thisValue = (++it)->u.operand;
+            int arguments = (++it)->u.operand;
+            int firstFreeRegister = (++it)->u.operand;
+            int varArgOffset = (++it)->u.operand;
+            ++it;
+            const char* opName;
+            if (opcode == op_call_varargs)
+                opName = "call_varargs";
+            else if (opcode == op_construct_varargs)
+                opName = "construct_varargs";
+            else if (opcode == op_tail_call_varargs)
+                opName = "tail_call_varargs";
+            else if (opcode == op_tail_call_forward_arguments)
+                opName = "tail_call_forward_arguments";
+            else
+                RELEASE_ASSERT_NOT_REACHED();
+
+            printLocationAndOp(out, exec, location, it, opName);
+            out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+
+        case op_ret: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
+            break;
+        }
+        case op_construct: {
+            printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
+            break;
+        }
+        case op_strcat: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int count = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "strcat");
+            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
+            break;
+        }
+        case op_to_primitive: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "to_primitive");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
+        case op_get_enumerable_length: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
+            out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+            it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
+            break;
+        }
+        case op_has_indexed_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            ArrayProfile* arrayProfile = it[4].u.arrayProfile;
+            printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
+            out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
+            it += OPCODE_LENGTH(op_has_indexed_property) - 1;
+            break;
+        }
+        case op_has_structure_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            int enumerator = it[4].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_has_structure_property");
+            out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
+            it += OPCODE_LENGTH(op_has_structure_property) - 1;
+            break;
+        }
+        case op_has_generic_property: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_has_generic_property");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
+            it += OPCODE_LENGTH(op_has_generic_property) - 1;
+            break;
+        }
+        case op_get_direct_pname: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            int propertyName = it[3].u.operand;
+            int index = it[4].u.operand;
+            int enumerator = it[5].u.operand;
+            ValueProfile* profile = it[6].u.profile;
+            printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
+            out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
+            it += OPCODE_LENGTH(op_get_direct_pname) - 1;
+            break;
+
+        }
+        case op_get_property_enumerator: {
+            int dst = it[1].u.operand;
+            int base = it[2].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
+            out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
+            it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
+            break;
+        }
+        case op_enumerator_structure_pname: {
+            int dst = it[1].u.operand;
+            int enumerator = it[2].u.operand;
+            int index = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
+            break;
+        }
+        case op_enumerator_generic_pname: {
+            int dst = it[1].u.operand;
+            int enumerator = it[2].u.operand;
+            int index = it[3].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
+            break;
+        }
+        case op_to_index_string: {
+            int dst = it[1].u.operand;
+            int index = it[2].u.operand;
+            printLocationAndOp(out, exec, location, it, "op_to_index_string");
+            out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
+            it += OPCODE_LENGTH(op_to_index_string) - 1;
+            break;
+        }
+        case op_push_with_scope: {
+            int dst = (++it)->u.operand;
+            int newScope = (++it)->u.operand;
+            int currentScope = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "push_with_scope");
+            out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
+            break;
+        }
+        case op_get_parent_scope: {
+            int dst = (++it)->u.operand;
+            int parentScope = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_parent_scope");
+            out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
+            break;
+        }
+        case op_create_lexical_environment: {
+            int dst = (++it)->u.operand;
+            int scope = (++it)->u.operand;
+            int symbolTable = (++it)->u.operand;
+            int initialValue = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "create_lexical_environment");
+            out.printf("%s, %s, %s, %s", 
+                registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
+            break;
+        }
+        case op_catch: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "catch");
+            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
+            break;
+        }
+        case op_throw: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
+            break;
+        }
+        case op_throw_static_error: {
+            int k0 = (++it)->u.operand;
+            ErrorType k1 = static_cast((++it)->u.unsignedValue);
+            printLocationAndOp(out, exec, location, it, "throw_static_error");
+            out.printf("%s, ", constantName(k0).data());
+            out.print(k1);
+            break;
+        }
+        case op_debug: {
+            int debugHookType = (++it)->u.operand;
+            int hasBreakpointFlag = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "debug");
+            out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag);
+            break;
+        }
+        case op_assert: {
+            int condition = (++it)->u.operand;
+            int line = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "assert");
+            out.printf("%s, %d", registerName(condition).data(), line);
+            break;
+        }
+        case op_end: {
+            int r0 = (++it)->u.operand;
+            printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
+            break;
+        }
+        case op_resolve_scope: {
+            int r0 = (++it)->u.operand;
+            int scope = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            ResolveType resolveType = static_cast((++it)->u.operand);
+            int depth = (++it)->u.operand;
+            void* pointer = (++it)->u.pointer;
+            printLocationAndOp(out, exec, location, it, "resolve_scope");
+            out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
+            break;
+        }
+        case op_get_from_scope: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
+            ++it; // Structure
+            int operand = (++it)->u.operand; // Operand
+            printLocationAndOp(out, exec, location, it, "get_from_scope");
+            out.print(registerName(r0), ", ", registerName(r1));
+            if (static_cast(id0) == UINT_MAX)
+                out.print(", anonymous");
+            else
+                out.print(", ", idName(id0, identifier(id0)));
+            out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_to_scope: {
+            int r0 = (++it)->u.operand;
+            int id0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
+            ++it; // Structure
+            int operand = (++it)->u.operand; // Operand
+            printLocationAndOp(out, exec, location, it, "put_to_scope");
+            out.print(registerName(r0));
+            if (static_cast(id0) == UINT_MAX)
+                out.print(", anonymous");
+            else
+                out.print(", ", idName(id0, identifier(id0)));
+            out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, , ", operand);
+            break;
+        }
+        case op_get_from_arguments: {
+            int r0 = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "get_from_arguments");
+            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
+            dumpValueProfiling(out, it, hasPrintedProfiling);
+            break;
+        }
+        case op_put_to_arguments: {
+            int r0 = (++it)->u.operand;
+            int offset = (++it)->u.operand;
+            int r1 = (++it)->u.operand;
+            printLocationAndOp(out, exec, location, it, "put_to_arguments");
+            out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
+            break;
+        }
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
+    {
+        dumpArithProfile(out, arithProfileForBytecodeOffset(location), hasPrintedProfiling);
+    }
+    
+#if ENABLE(DFG_JIT)
+    Vector exitSites = exitProfile().exitSitesFor(location);
+    if (!exitSites.isEmpty()) {
+        out.print(" !! frequent exits: ");
+        CommaPrinter comma;
+        for (unsigned i = 0; i < exitSites.size(); ++i)
+            out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
+    }
+#else // ENABLE(DFG_JIT)
+    UNUSED_PARAM(location);
+#endif // ENABLE(DFG_JIT)
+    out.print("\n");
+}
+
+void CodeBlock::dumpBytecode(
+    PrintStream& out, unsigned bytecodeOffset,
+    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
+{
+    ExecState* exec = m_globalObject->globalExec();
+    const Instruction* it = instructions().begin() + bytecodeOffset;
+    dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
+}
+
+#define FOR_EACH_MEMBER_VECTOR(macro) \
+    macro(instructions) \
+    macro(callLinkInfos) \
+    macro(linkedCallerList) \
+    macro(identifiers) \
+    macro(functionExpressions) \
+    macro(constantRegisters)
+
+template
+static size_t sizeInBytes(const Vector& vector)
+{
+    return vector.capacity() * sizeof(T);
+}
+
+namespace {
+
+class PutToScopeFireDetail : public FireDetail {
+public:
+    PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
+        : m_codeBlock(codeBlock)
+        , m_ident(ident)
+    {
+    }
+    
+    void dump(PrintStream& out) const override
+    {
+        out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast(m_codeBlock->ownerExecutable())), " for ", m_ident);
+    }
+    
+private:
+    CodeBlock* m_codeBlock;
+    const Identifier& m_ident;
+};
+
+} // anonymous namespace
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
+    : JSCell(*vm, structure)
+    , m_globalObject(other.m_globalObject)
+    , m_numCalleeLocals(other.m_numCalleeLocals)
+    , m_numVars(other.m_numVars)
+    , m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+    , m_didFailJITCompilation(false)
+    , m_didFailFTLCompilation(false)
+    , m_hasBeenCompiledWithFTL(false)
+    , m_isConstructor(other.m_isConstructor)
+    , m_isStrictMode(other.m_isStrictMode)
+    , m_codeType(other.m_codeType)
+    , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
+    , m_hasDebuggerStatement(false)
+    , m_steppingMode(SteppingModeDisabled)
+    , m_numBreakpoints(0)
+    , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
+    , m_vm(other.m_vm)
+    , m_instructions(other.m_instructions)
+    , m_thisRegister(other.m_thisRegister)
+    , m_scopeRegister(other.m_scopeRegister)
+    , m_hash(other.m_hash)
+    , m_source(other.m_source)
+    , m_sourceOffset(other.m_sourceOffset)
+    , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
+    , m_constantRegisters(other.m_constantRegisters)
+    , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
+    , m_functionDecls(other.m_functionDecls)
+    , m_functionExprs(other.m_functionExprs)
+    , m_osrExitCounter(0)
+    , m_optimizationDelayCounter(0)
+    , m_reoptimizationRetryCounter(0)
+    , m_creationTime(std::chrono::steady_clock::now())
+{
+    m_visitWeaklyHasBeenCalled = false;
+
+    ASSERT(heap()->isDeferred());
+    ASSERT(m_scopeRegister.isLocal());
+
+    setNumParameters(other.numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
+{
+    Base::finishCreation(vm);
+
+    optimizeAfterWarmUp();
+    jitAfterWarmUp();
+
+    if (other.m_rareData) {
+        createRareDataIfNecessary();
+        
+        m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
+        m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
+        m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
+        m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
+    }
+    
+    heap()->m_codeBlocks->add(this);
+}
+
+CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+    JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+    : JSCell(*vm, structure)
+    , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
+    , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
+    , m_numVars(unlinkedCodeBlock->m_numVars)
+    , m_shouldAlwaysBeInlined(true)
+#if ENABLE(JIT)
+    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
+#endif
+    , m_didFailJITCompilation(false)
+    , m_didFailFTLCompilation(false)
+    , m_hasBeenCompiledWithFTL(false)
+    , m_isConstructor(unlinkedCodeBlock->isConstructor())
+    , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
+    , m_codeType(unlinkedCodeBlock->codeType())
+    , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
+    , m_hasDebuggerStatement(false)
+    , m_steppingMode(SteppingModeDisabled)
+    , m_numBreakpoints(0)
+    , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
+    , m_vm(unlinkedCodeBlock->vm())
+    , m_thisRegister(unlinkedCodeBlock->thisRegister())
+    , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
+    , m_source(sourceProvider)
+    , m_sourceOffset(sourceOffset)
+    , m_firstLineColumnOffset(firstLineColumnOffset)
+    , m_osrExitCounter(0)
+    , m_optimizationDelayCounter(0)
+    , m_reoptimizationRetryCounter(0)
+    , m_creationTime(std::chrono::steady_clock::now())
+{
+    m_visitWeaklyHasBeenCalled = false;
+
+    ASSERT(heap()->isDeferred());
+    ASSERT(m_scopeRegister.isLocal());
+
+    ASSERT(m_source);
+    setNumParameters(unlinkedCodeBlock->numParameters());
+}
+
+void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
+    JSScope* scope)
+{
+    Base::finishCreation(vm);
+
+    if (vm.typeProfiler() || vm.controlFlowProfiler())
+        vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
+
+    setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
+    if (unlinkedCodeBlock->usesGlobalObject())
+        m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
+
+    for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
+        LinkTimeConstant type = static_cast(i);
+        if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
+            m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
+    }
+
+    // We already have the cloned symbol table for the module environment since we need to instantiate
+    // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
+    if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast(unlinkedCodeBlock)) {
+        SymbolTable* clonedSymbolTable = jsCast(ownerExecutable)->moduleEnvironmentSymbolTable();
+        if (m_vm->typeProfiler()) {
+            ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
+            clonedSymbolTable->prepareForTypeProfiling(locker);
+        }
+        replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
+    }
+
+    bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
+    m_functionDecls = RefCountedArray>(unlinkedCodeBlock->numberOfFunctionDecls());
+    for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
+        UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
+        if (shouldUpdateFunctionHasExecutedCache)
+            vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+        m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+    }
+
+    m_functionExprs = RefCountedArray>(unlinkedCodeBlock->numberOfFunctionExprs());
+    for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
+        UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
+        if (shouldUpdateFunctionHasExecutedCache)
+            vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
+        m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
+    }
+
+    if (unlinkedCodeBlock->hasRareData()) {
+        createRareDataIfNecessary();
+        if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
+            m_rareData->m_constantBuffers.grow(count);
+            for (size_t i = 0; i < count; i++) {
+                const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
+                m_rareData->m_constantBuffers[i] = buffer;
+            }
+        }
+        if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
+            m_rareData->m_exceptionHandlers.resizeToFit(count);
+            for (size_t i = 0; i < count; i++) {
+                const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
+                HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+#if ENABLE(JIT)
+                handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
+#else
+                handler.initialize(unlinkedHandler);
+#endif
+            }
+        }
+
+        if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
+            m_rareData->m_stringSwitchJumpTables.grow(count);
+            for (size_t i = 0; i < count; i++) {
+                UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
+                UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
+                for (; ptr != end; ++ptr) {
+                    OffsetLocation offset;
+                    offset.branchOffset = ptr->value.branchOffset;
+                    m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
+                }
+            }
+        }
+
+        if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
+            m_rareData->m_switchJumpTables.grow(count);
+            for (size_t i = 0; i < count; i++) {
+                UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
+                SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
+                destTable.branchOffsets = sourceTable.branchOffsets;
+                destTable.min = sourceTable.min;
+            }
+        }
+    }
+
+    // Allocate metadata buffers for the bytecode
+    if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
+        m_llintCallLinkInfos = RefCountedArray(size);
+    if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
+        m_arrayProfiles.grow(size);
+    if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
+        m_arrayAllocationProfiles = RefCountedArray(size);
+    if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
+        m_valueProfiles = RefCountedArray(size);
+    if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
+        m_objectAllocationProfiles = RefCountedArray(size);
+
+#if ENABLE(JIT)
+    setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
+#endif
+
+    // Copy and translate the UnlinkedInstructions
+    unsigned instructionCount = unlinkedCodeBlock->instructions().count();
+    UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
+
+    // Bookkeep the strongly referenced module environments.
+    HashSet stronglyReferencedModuleEnvironments;
+
+    RefCountedArray instructions(instructionCount);
+
+    unsigned valueProfileCount = 0;
+    auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
+        unsigned valueProfileIndex = valueProfileCount++;
+        ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
+        ASSERT(profile->m_bytecodeOffset == -1);
+        profile->m_bytecodeOffset = bytecodeOffset;
+        instructions[bytecodeOffset + opLength - 1] = profile;
+    };
+
+    for (unsigned i = 0; !instructionReader.atEnd(); ) {
+        const UnlinkedInstruction* pc = instructionReader.next();
+
+        unsigned opLength = opcodeLength(pc[0].u.opcode);
+
+        instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
+        for (size_t j = 1; j < opLength; ++j) {
+            if (sizeof(int32_t) != sizeof(intptr_t))
+                instructions[i + j].u.pointer = 0;
+            instructions[i + j].u.operand = pc[j].u.operand;
+        }
+        switch (pc[0].u.opcode) {
+        case op_has_indexed_property: {
+            int arrayProfileIndex = pc[opLength - 1].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+            break;
+        }
+        case op_call_varargs:
+        case op_tail_call_varargs:
+        case op_tail_call_forward_arguments:
+        case op_construct_varargs:
+        case op_get_by_val: {
+            int arrayProfileIndex = pc[opLength - 2].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+
+            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
+            FALLTHROUGH;
+        }
+        case op_get_direct_pname:
+        case op_get_by_id:
+        case op_get_by_id_with_this:
+        case op_try_get_by_id:
+        case op_get_by_val_with_this:
+        case op_get_from_arguments:
+        case op_to_number:
+        case op_get_argument: {
+            linkValueProfile(i, opLength);
+            break;
+        }
+        case op_put_by_val: {
+            int arrayProfileIndex = pc[opLength - 1].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+            break;
+        }
+        case op_put_by_val_direct: {
+            int arrayProfileIndex = pc[opLength - 1].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
+            break;
+        }
+
+        case op_new_array:
+        case op_new_array_buffer:
+        case op_new_array_with_size: {
+            int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
+            instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
+            break;
+        }
+        case op_new_object: {
+            int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
+            ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
+            int inferredInlineCapacity = pc[opLength - 2].u.operand;
+
+            instructions[i + opLength - 1] = objectAllocationProfile;
+            objectAllocationProfile->initialize(vm,
+                m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
+            break;
+        }
+
+        case op_call:
+        case op_tail_call:
+        case op_call_eval: {
+            linkValueProfile(i, opLength);
+            int arrayProfileIndex = pc[opLength - 2].u.operand;
+            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
+            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
+            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
+            break;
+        }
+        case op_construct: {
+            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
+            linkValueProfile(i, opLength);
+            break;
+        }
+        case op_get_array_length:
+            CRASH();
+
+        case op_resolve_scope: {
+            const Identifier& ident = identifier(pc[3].u.operand);
+            ResolveType type = static_cast(pc[4].u.operand);
+            RELEASE_ASSERT(type != LocalClosureVar);
+            int localScopeDepth = pc[5].u.operand;
+
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+            instructions[i + 4].u.operand = op.type;
+            instructions[i + 5].u.operand = op.depth;
+            if (op.lexicalEnvironment) {
+                if (op.type == ModuleVar) {
+                    // Keep the linked module environment strongly referenced.
+                    if (stronglyReferencedModuleEnvironments.add(jsCast(op.lexicalEnvironment)).isNewEntry)
+                        addConstant(op.lexicalEnvironment);
+                    instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
+                } else
+                    instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
+            } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
+                instructions[i + 6].u.jsCell.set(vm, this, constantScope);
+            else
+                instructions[i + 6].u.pointer = nullptr;
+            break;
+        }
+
+        case op_get_from_scope: {
+            linkValueProfile(i, opLength);
+
+            // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
+
+            int localScopeDepth = pc[5].u.operand;
+            instructions[i + 5].u.pointer = nullptr;
+
+            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+            ASSERT(!isInitialization(getPutInfo.initializationMode()));
+            if (getPutInfo.resolveType() == LocalClosureVar) {
+                instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+                break;
+            }
+
+            const Identifier& ident = identifier(pc[3].u.operand);
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
+
+            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            if (op.type == ModuleVar)
+                instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
+            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
+                instructions[i + 5].u.watchpointSet = op.watchpointSet;
+            else if (op.structure)
+                instructions[i + 5].u.structure.set(vm, this, op.structure);
+            instructions[i + 6].u.pointer = reinterpret_cast(op.operand);
+            break;
+        }
+
+        case op_put_to_scope: {
+            // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
+            GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+            if (getPutInfo.resolveType() == LocalClosureVar) {
+                // Only do watching if the property we're putting to is not anonymous.
+                if (static_cast(pc[2].u.operand) != UINT_MAX) {
+                    int symbolTableIndex = pc[5].u.operand;
+                    SymbolTable* symbolTable = jsCast(getConstant(symbolTableIndex));
+                    const Identifier& ident = identifier(pc[2].u.operand);
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    auto iter = symbolTable->find(locker, ident.impl());
+                    ASSERT(iter != symbolTable->end(locker));
+                    iter->value.prepareToWatch();
+                    instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
+                } else
+                    instructions[i + 5].u.watchpointSet = nullptr;
+                break;
+            }
+
+            const Identifier& ident = identifier(pc[2].u.operand);
+            int localScopeDepth = pc[5].u.operand;
+            instructions[i + 5].u.pointer = nullptr;
+            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
+
+            instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
+            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
+                instructions[i + 5].u.watchpointSet = op.watchpointSet;
+            else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
+                if (op.watchpointSet)
+                    op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
+            } else if (op.structure)
+                instructions[i + 5].u.structure.set(vm, this, op.structure);
+            instructions[i + 6].u.pointer = reinterpret_cast(op.operand);
+
+            break;
+        }
+
+        case op_profile_type: {
+            RELEASE_ASSERT(vm.typeProfiler());
+            // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
+            size_t instructionOffset = i + opLength - 1;
+            unsigned divotStart, divotEnd;
+            GlobalVariableID globalVariableID = 0;
+            RefPtr globalTypeSet;
+            bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
+            VirtualRegister profileRegister(pc[1].u.operand);
+            ProfileTypeBytecodeFlag flag = static_cast(pc[3].u.operand);
+            SymbolTable* symbolTable = nullptr;
+
+            switch (flag) {
+            case ProfileTypeBytecodeClosureVar: {
+                const Identifier& ident = identifier(pc[4].u.operand);
+                int localScopeDepth = pc[2].u.operand;
+                ResolveType type = static_cast(pc[5].u.operand);
+                // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
+                // we're abstractly "read"ing from a JSScope.
+                ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
+
+                if (op.type == ClosureVar || op.type == ModuleVar)
+                    symbolTable = op.lexicalEnvironment->symbolTable();
+                else if (op.type == GlobalVar)
+                    symbolTable = m_globalObject.get()->symbolTable();
+
+                UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
+                if (symbolTable) {
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+                    symbolTable->prepareForTypeProfiling(locker);
+                    globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
+                    globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
+                } else
+                    globalVariableID = TypeProfilerNoGlobalIDExists;
+
+                break;
+            }
+            case ProfileTypeBytecodeLocallyResolved: {
+                int symbolTableIndex = pc[2].u.operand;
+                SymbolTable* symbolTable = jsCast(getConstant(symbolTableIndex));
+                const Identifier& ident = identifier(pc[4].u.operand);
+                ConcurrentJSLocker locker(symbolTable->m_lock);
+                // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
+                globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
+                globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
+
+                break;
+            }
+            case ProfileTypeBytecodeDoesNotHaveGlobalID: 
+            case ProfileTypeBytecodeFunctionArgument: {
+                globalVariableID = TypeProfilerNoGlobalIDExists;
+                break;
+            }
+            case ProfileTypeBytecodeFunctionReturnStatement: {
+                RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
+                globalTypeSet = jsCast(ownerExecutable)->returnStatementTypeSet();
+                globalVariableID = TypeProfilerReturnStatement;
+                if (!shouldAnalyze) {
+                    // Because a return statement can be added implicitly to return undefined at the end of a function,
+                    // and these nodes don't emit expression ranges because they aren't in the actual source text of
+                    // the user's program, give the type profiler some range to identify these return statements.
+                    // Currently, the text offset that is used as identification is "f" in the function keyword
+                    // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
+                    divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
+                    shouldAnalyze = true;
+                }
+                break;
+            }
+            }
+
+            std::pair locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
+                ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, &vm);
+            TypeLocation* location = locationPair.first;
+            bool isNewLocation = locationPair.second;
+
+            if (flag == ProfileTypeBytecodeFunctionReturnStatement)
+                location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
+
+            if (shouldAnalyze && isNewLocation)
+                vm.typeProfiler()->insertNewLocation(location);
+
+            instructions[i + 2].u.location = location;
+            break;
+        }
+
+        case op_debug: {
+            if (pc[1].u.index == DidReachBreakpoint)
+                m_hasDebuggerStatement = true;
+            break;
+        }
+
+        case op_create_rest: {
+            int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
+            ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
+            ASSERT_WITH_MESSAGE(numberOfArgumentsToSkip == numParameters() - 1, "We assume that this is true when rematerializing the rest parameter during OSR exit in the FTL JIT.");
+            break;
+        }
+
+        default:
+            break;
+        }
+        i += opLength;
+    }
+
+    if (vm.controlFlowProfiler())
+        insertBasicBlockBoundariesForControlFlowProfiler(instructions);
+
+    m_instructions = WTFMove(instructions);
+
+    // Set optimization thresholds only after m_instructions is initialized, since these
+    // rely on the instruction count (and are in theory permitted to also inspect the
+    // instruction stream to more accurate assess the cost of tier-up).
+    optimizeAfterWarmUp();
+    jitAfterWarmUp();
+
+    // If the concurrent thread will want the code block's hash, then compute it here
+    // synchronously.
+    if (Options::alwaysComputeHash())
+        hash();
+
+    if (Options::dumpGeneratedBytecodes())
+        dumpBytecode();
+    
+    heap()->m_codeBlocks->add(this);
+    heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
+}
+
+CodeBlock::~CodeBlock()
+{
+    if (m_vm->m_perBytecodeProfiler)
+        m_vm->m_perBytecodeProfiler->notifyDestruction(this);
+
+    if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
+        unlinkedCodeBlock()->setDidOptimize(FalseTriState);
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+    dumpValueProfiles();
+#endif
+
+    // We may be destroyed before any CodeBlocks that refer to us are destroyed.
+    // Consider that two CodeBlocks become unreachable at the same time. There
+    // is no guarantee about the order in which the CodeBlocks are destroyed.
+    // So, if we don't remove incoming calls, and get destroyed before the
+    // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
+    // destructor will try to remove nodes from our (no longer valid) linked list.
+    unlinkIncomingCalls();
+    
+    // Note that our outgoing calls will be removed from other CodeBlocks'
+    // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
+    // destructors.
+
+#if ENABLE(JIT)
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo* stub = *iter;
+        stub->aboutToDie();
+        stub->deref();
+    }
+#endif // ENABLE(JIT)
+}
+
+void CodeBlock::setConstantRegisters(const Vector>& constants, const Vector& constantsSourceCodeRepresentation)
+{
+    ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
+    size_t count = constants.size();
+    m_constantRegisters.resizeToFit(count);
+    bool hasTypeProfiler = !!m_vm->typeProfiler();
+    for (size_t i = 0; i < count; i++) {
+        JSValue constant = constants[i].get();
+
+        if (!constant.isEmpty()) {
+            if (SymbolTable* symbolTable = jsDynamicCast(constant)) {
+                if (hasTypeProfiler) {
+                    ConcurrentJSLocker locker(symbolTable->m_lock);
+                    symbolTable->prepareForTypeProfiling(locker);
+                }
+
+                SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
+                if (wasCompiledWithDebuggingOpcodes())
+                    clone->setRareDataCodeBlock(this);
+
+                constant = clone;
+            }
+        }
+
+        m_constantRegisters[i].set(*m_vm, this, constant);
+    }
+
+    m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
+}
+
+void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
+{
+    m_alternative.set(vm, this, alternative);
+}
+
+void CodeBlock::setNumParameters(int newValue)
+{
+    m_numParameters = newValue;
+
+    m_argumentValueProfiles = RefCountedArray(newValue);
+}
+
+CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
+{
+#if ENABLE(FTL_JIT)
+    if (jitType() != JITCode::DFGJIT)
+        return 0;
+    DFG::JITCode* jitCode = m_jitCode->dfg();
+    return jitCode->osrEntryBlock();
+#else // ENABLE(FTL_JIT)
+    return 0;
+#endif // ENABLE(FTL_JIT)
+}
+
+void CodeBlock::visitWeakly(SlotVisitor& visitor)
+{
+    ConcurrentJSLocker locker(m_lock);
+    if (m_visitWeaklyHasBeenCalled)
+        return;
+    
+    m_visitWeaklyHasBeenCalled = true;
+
+    if (Heap::isMarkedConcurrently(this))
+        return;
+
+    if (shouldVisitStrongly(locker)) {
+        visitor.appendUnbarriered(this);
+        return;
+    }
+    
+    // There are two things that may use unconditional finalizers: inline cache clearing
+    // and jettisoning. The probability of us wanting to do at least one of those things
+    // is probably quite close to 1. So we add one no matter what and when it runs, it
+    // figures out whether it has any work to do.
+    visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return;
+
+    // If we jettison ourselves we'll install our alternative, so make sure that it
+    // survives GC even if we don't.
+    visitor.append(m_alternative);
+    
+    // There are two things that we use weak reference harvesters for: DFG fixpoint for
+    // jettisoning, and trying to find structures that would be live based on some
+    // inline cache. So it makes sense to register them regardless.
+    visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
+
+#if ENABLE(DFG_JIT)
+    // We get here if we're live in the sense that our owner executable is live,
+    // but we're not yet live for sure in another sense: we may yet decide that this
+    // code block should be jettisoned based on its outgoing weak references being
+    // stale. Set a flag to indicate that we're still assuming that we're dead, and
+    // perform one round of determining if we're live. The GC may determine, based on
+    // either us marking additional objects, or by other objects being marked for
+    // other reasons, that this iteration should run again; it will notify us of this
+    // decision by calling harvestWeakReferences().
+
+    m_allTransitionsHaveBeenMarked = false;
+    propagateTransitions(locker, visitor);
+
+    m_jitCode->dfgCommon()->livenessHasBeenProved = false;
+    determineLiveness(locker, visitor);
+#endif // ENABLE(DFG_JIT)
+}
+
+size_t CodeBlock::estimatedSize(JSCell* cell)
+{
+    CodeBlock* thisObject = jsCast(cell);
+    size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
+    if (thisObject->m_jitCode)
+        extraMemoryAllocated += thisObject->m_jitCode->size();
+    return Base::estimatedSize(cell) + extraMemoryAllocated;
+}
+
+void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    CodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    JSCell::visitChildren(thisObject, visitor);
+    thisObject->visitChildren(visitor);
+}
+
+void CodeBlock::visitChildren(SlotVisitor& visitor)
+{
+    ConcurrentJSLocker locker(m_lock);
+    // There are two things that may use unconditional finalizers: inline cache clearing
+    // and jettisoning. The probability of us wanting to do at least one of those things
+    // is probably quite close to 1. So we add one no matter what and when it runs, it
+    // figures out whether it has any work to do.
+    visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
+
+    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
+        visitor.appendUnbarriered(otherBlock);
+
+    if (m_jitCode)
+        visitor.reportExtraMemoryVisited(m_jitCode->size());
+    if (m_instructions.size()) {
+        unsigned refCount = m_instructions.refCount();
+        RELEASE_ASSERT(refCount);
+        visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
+    }
+
+    stronglyVisitStrongReferences(locker, visitor);
+    stronglyVisitWeakReferences(locker, visitor);
+
+    m_allTransitionsHaveBeenMarked = false;
+    propagateTransitions(locker, visitor);
+}
+
+bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
+{
+    if (Options::forceCodeBlockLiveness())
+        return true;
+
+    if (shouldJettisonDueToOldAge(locker))
+        return false;
+
+    // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
+    // their weak references go stale. So if a basline JIT CodeBlock gets
+    // scanned, we can assume that this means that it's live.
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return true;
+
+    return false;
+}
+
+bool CodeBlock::shouldJettisonDueToWeakReference()
+{
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return false;
+    return !Heap::isMarked(this);
+}
+
+static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
+{
+    if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
+        switch (jitType) {
+        case JITCode::InterpreterThunk:
+            return std::chrono::milliseconds(10);
+        case JITCode::BaselineJIT:
+            return std::chrono::milliseconds(10 + 20);
+        case JITCode::DFGJIT:
+            return std::chrono::milliseconds(40);
+        case JITCode::FTLJIT:
+            return std::chrono::milliseconds(120);
+        default:
+            return std::chrono::milliseconds::max();
+        }
+    }
+
+    switch (jitType) {
+    case JITCode::InterpreterThunk:
+        return std::chrono::duration_cast(std::chrono::seconds(5));
+    case JITCode::BaselineJIT:
+        // Effectively 10 additional seconds, since BaselineJIT and
+        // InterpreterThunk share a CodeBlock.
+        return std::chrono::duration_cast(std::chrono::seconds(5 + 10));
+    case JITCode::DFGJIT:
+        return std::chrono::duration_cast(std::chrono::seconds(20));
+    case JITCode::FTLJIT:
+        return std::chrono::duration_cast(std::chrono::seconds(60));
+    default:
+        return std::chrono::milliseconds::max();
+    }
+}
+
+bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
+{
+    if (Heap::isMarkedConcurrently(this))
+        return false;
+
+    if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
+        return true;
+    
+    if (timeSinceCreation() < timeToLive(jitType()))
+        return false;
+    
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
+{
+    if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
+        return false;
+    
+    if (!Heap::isMarkedConcurrently(transition.m_from.get()))
+        return false;
+    
+    return true;
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
+{
+    UNUSED_PARAM(visitor);
+
+    if (m_allTransitionsHaveBeenMarked)
+        return;
+
+    bool allAreMarkedSoFar = true;
+        
+    Interpreter* interpreter = m_vm->interpreter;
+    if (jitType() == JITCode::InterpreterThunk) {
+        const Vector& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+        for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
+            Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
+            switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
+            case op_put_by_id: {
+                StructureID oldStructureID = instruction[4].u.structureID;
+                StructureID newStructureID = instruction[6].u.structureID;
+                if (!oldStructureID || !newStructureID)
+                    break;
+                Structure* oldStructure =
+                    m_vm->heap.structureIDTable().get(oldStructureID);
+                Structure* newStructure =
+                    m_vm->heap.structureIDTable().get(newStructureID);
+                if (Heap::isMarkedConcurrently(oldStructure))
+                    visitor.appendUnbarriered(newStructure);
+                else
+                    allAreMarkedSoFar = false;
+                break;
+            }
+            default:
+                break;
+            }
+        }
+    }
+
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType())) {
+        for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
+            allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
+    }
+#endif // ENABLE(JIT)
+    
+#if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType())) {
+        DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+        for (auto& weakReference : dfgCommon->weakStructureReferences)
+            allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
+        
+        for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+            if (shouldMarkTransition(dfgCommon->transitions[i])) {
+                // If the following three things are live, then the target of the
+                // transition is also live:
+                //
+                // - This code block. We know it's live already because otherwise
+                //   we wouldn't be scanning ourselves.
+                //
+                // - The code origin of the transition. Transitions may arise from
+                //   code that was inlined. They are not relevant if the user's
+                //   object that is required for the inlinee to run is no longer
+                //   live.
+                //
+                // - The source of the transition. The transition checks if some
+                //   heap location holds the source, and if so, stores the target.
+                //   Hence the source must be live for the transition to be live.
+                //
+                // We also short-circuit the liveness if the structure is harmless
+                // to mark (i.e. its global object and prototype are both already
+                // live).
+                
+                visitor.append(dfgCommon->transitions[i].m_to);
+            } else
+                allAreMarkedSoFar = false;
+        }
+    }
+#endif // ENABLE(DFG_JIT)
+    
+    if (allAreMarkedSoFar)
+        m_allTransitionsHaveBeenMarked = true;
+}
+
+void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
+{
+    UNUSED_PARAM(visitor);
+    
+#if ENABLE(DFG_JIT)
+    // Check if we have any remaining work to do.
+    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+    if (dfgCommon->livenessHasBeenProved)
+        return;
+    
+    // Now check all of our weak references. If all of them are live, then we
+    // have proved liveness and so we scan our strong references. If at end of
+    // GC we still have not proved liveness, then this code block is toast.
+    bool allAreLiveSoFar = true;
+    for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+        ASSERT(!jsDynamicCast(dfgCommon->weakReferences[i].get()));
+        if (!Heap::isMarkedConcurrently(dfgCommon->weakReferences[i].get())) {
+            allAreLiveSoFar = false;
+            break;
+        }
+    }
+    if (allAreLiveSoFar) {
+        for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
+            if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
+                allAreLiveSoFar = false;
+                break;
+            }
+        }
+    }
+    
+    // If some weak references are dead, then this fixpoint iteration was
+    // unsuccessful.
+    if (!allAreLiveSoFar)
+        return;
+    
+    // All weak references are live. Record this information so we don't
+    // come back here again, and scan the strong references.
+    dfgCommon->livenessHasBeenProved = true;
+    visitor.appendUnbarriered(this);
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
+{
+    CodeBlock* codeBlock =
+        bitwise_cast(
+            bitwise_cast(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
+    
+    codeBlock->propagateTransitions(NoLockingNecessary, visitor);
+    codeBlock->determineLiveness(NoLockingNecessary, visitor);
+}
+
+void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
+{
+    instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+    instruction[4].u.pointer = nullptr;
+    instruction[5].u.pointer = nullptr;
+    instruction[6].u.pointer = nullptr;
+}
+
+void CodeBlock::finalizeLLIntInlineCaches()
+{
+    Interpreter* interpreter = m_vm->interpreter;
+    const Vector& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
+    for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+        Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
+        switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+        case op_get_by_id:
+        case op_get_by_id_proto_load:
+        case op_get_by_id_unset: {
+            StructureID oldStructureID = curInstruction[4].u.structureID;
+            if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt property access.\n");
+            clearLLIntGetByIdCache(curInstruction);
+            break;
+        }
+        case op_put_by_id: {
+            StructureID oldStructureID = curInstruction[4].u.structureID;
+            StructureID newStructureID = curInstruction[6].u.structureID;
+            StructureChain* chain = curInstruction[7].u.structureChain.get();
+            if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
+                (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
+                (!chain || Heap::isMarked(chain)))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt put transition.\n");
+            curInstruction[4].u.structureID = 0;
+            curInstruction[5].u.operand = 0;
+            curInstruction[6].u.structureID = 0;
+            curInstruction[7].u.structureChain.clear();
+            break;
+        }
+        case op_get_array_length:
+            break;
+        case op_to_this:
+            if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
+            curInstruction[2].u.structure.clear();
+            curInstruction[3].u.toThisStatus = merge(
+                curInstruction[3].u.toThisStatus, ToThisClearedByGC);
+            break;
+        case op_create_this: {
+            auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
+            if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
+                break;
+            JSCell* cachedFunction = cacheWriteBarrier.get();
+            if (Heap::isMarked(cachedFunction))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
+            cacheWriteBarrier.clear();
+            break;
+        }
+        case op_resolve_scope: {
+            // Right now this isn't strictly necessary. Any symbol tables that this will refer to
+            // are for outer functions, and we refer to those functions strongly, and they refer
+            // to the symbol table strongly. But it's nice to be on the safe side.
+            WriteBarrierBase& symbolTable = curInstruction[6].u.symbolTable;
+            if (!symbolTable || Heap::isMarked(symbolTable.get()))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
+            symbolTable.clear();
+            break;
+        }
+        case op_get_from_scope:
+        case op_put_to_scope: {
+            GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
+            if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
+                || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
+                continue;
+            WriteBarrierBase& structure = curInstruction[5].u.structure;
+            if (!structure || Heap::isMarked(structure.get()))
+                break;
+            if (Options::verboseOSR())
+                dataLogF("Clearing scope access with structure %p.\n", structure.get());
+            structure.clear();
+            break;
+        }
+        default:
+            OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
+            ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
+        }
+    }
+
+    // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
+    // then cleared the cache without GCing in between.
+    m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
+        return !Heap::isMarked(pair.key);
+    });
+
+    for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+        if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+            if (Options::verboseOSR())
+                dataLog("Clearing LLInt call from ", *this, "\n");
+            m_llintCallLinkInfos[i].unlink();
+        }
+        if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+            m_llintCallLinkInfos[i].lastSeenCallee.clear();
+    }
+}
+
+void CodeBlock::finalizeBaselineJITInlineCaches()
+{
+#if ENABLE(JIT)
+    for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
+        (*iter)->visitWeak(*vm());
+
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo& stubInfo = **iter;
+        stubInfo.visitWeakReferences(this);
+    }
+#endif
+}
+
+void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
+{
+    CodeBlock* codeBlock = bitwise_cast(
+        bitwise_cast(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
+    
+    codeBlock->updateAllPredictions();
+    
+    if (!Heap::isMarked(codeBlock)) {
+        if (codeBlock->shouldJettisonDueToWeakReference())
+            codeBlock->jettison(Profiler::JettisonDueToWeakReference);
+        else
+            codeBlock->jettison(Profiler::JettisonDueToOldAge);
+        return;
+    }
+
+    if (JITCode::couldBeInterpreted(codeBlock->jitType()))
+        codeBlock->finalizeLLIntInlineCaches();
+
+#if ENABLE(JIT)
+    if (!!codeBlock->jitCode())
+        codeBlock->finalizeBaselineJITInlineCaches();
+#endif
+}
+
+void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType()))
+        toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
+#else
+    UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getStubInfoMap(StubInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getStubInfoMap(locker, result);
+}
+
+void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType()))
+        toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
+#else
+    UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getCallLinkInfoMap(locker, result);
+}
+
+void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
+{
+#if ENABLE(JIT)
+    if (JITCode::isJIT(jitType())) {
+        for (auto* byValInfo : m_byValInfos)
+            result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
+    }
+#else
+    UNUSED_PARAM(result);
+#endif
+}
+
+void CodeBlock::getByValInfoMap(ByValInfoMap& result)
+{
+    ConcurrentJSLocker locker(m_lock);
+    getByValInfoMap(locker, result);
+}
+
+#if ENABLE(JIT)
+StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return m_stubInfos.add(accessType);
+}
+
+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
+{
+    return m_addICs.add(arithProfile);
+}
+
+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
+{
+    return m_mulICs.add(arithProfile);
+}
+
+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
+{
+    return m_subICs.add(arithProfile);
+}
+
+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
+{
+    return m_negICs.add(arithProfile);
+}
+
+StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
+{
+    for (StructureStubInfo* stubInfo : m_stubInfos) {
+        if (stubInfo->codeOrigin == codeOrigin)
+            return stubInfo;
+    }
+    return nullptr;
+}
+
+ByValInfo* CodeBlock::addByValInfo()
+{
+    ConcurrentJSLocker locker(m_lock);
+    return m_byValInfos.add();
+}
+
+CallLinkInfo* CodeBlock::addCallLinkInfo()
+{
+    ConcurrentJSLocker locker(m_lock);
+    return m_callLinkInfos.add();
+}
+
+CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
+{
+    for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
+        if ((*iter)->codeOrigin() == CodeOrigin(index))
+            return *iter;
+    }
+    return nullptr;
+}
+
+void CodeBlock::resetJITData()
+{
+    RELEASE_ASSERT(!JITCode::isJIT(jitType()));
+    ConcurrentJSLocker locker(m_lock);
+    
+    // We can clear these because no other thread will have references to any stub infos, call
+    // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+    // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
+    // don't have JIT code.
+    m_stubInfos.clear();
+    m_callLinkInfos.clear();
+    m_byValInfos.clear();
+    
+    // We can clear this because the DFG's queries to these data structures are guarded by whether
+    // there is JIT code.
+    m_rareCaseProfiles.clear();
+}
+#endif
+
+void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
+{
+    // We strongly visit OSR exits targets because we don't want to deal with
+    // the complexity of generating an exit target CodeBlock on demand and
+    // guaranteeing that it matches the details of the CodeBlock we compiled
+    // the OSR exit against.
+
+    visitor.append(m_alternative);
+
+#if ENABLE(DFG_JIT)
+    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+    if (dfgCommon->inlineCallFrames) {
+        for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
+            ASSERT(inlineCallFrame->baselineCodeBlock);
+            visitor.append(inlineCallFrame->baselineCodeBlock);
+        }
+    }
+#endif
+}
+
+void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
+{
+    UNUSED_PARAM(locker);
+    
+    visitor.append(m_globalObject);
+    visitor.append(m_ownerExecutable);
+    visitor.append(m_unlinkedCode);
+    if (m_rareData)
+        m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
+    visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
+    for (size_t i = 0; i < m_functionExprs.size(); ++i)
+        visitor.append(m_functionExprs[i]);
+    for (size_t i = 0; i < m_functionDecls.size(); ++i)
+        visitor.append(m_functionDecls[i]);
+    for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
+        m_objectAllocationProfiles[i].visitAggregate(visitor);
+
+#if ENABLE(JIT)
+    for (ByValInfo* byValInfo : m_byValInfos)
+        visitor.append(byValInfo->cachedSymbol);
+#endif
+
+#if ENABLE(DFG_JIT)
+    if (JITCode::isOptimizingJIT(jitType()))
+        visitOSRExitTargets(locker, visitor);
+#endif
+}
+
+void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
+{
+    UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return;
+    
+    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+
+    for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+        if (!!dfgCommon->transitions[i].m_codeOrigin)
+            visitor.append(dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+        visitor.append(dfgCommon->transitions[i].m_from);
+        visitor.append(dfgCommon->transitions[i].m_to);
+    }
+    
+    for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
+        visitor.append(dfgCommon->weakReferences[i]);
+
+    for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
+        visitor.append(dfgCommon->weakStructureReferences[i]);
+
+    dfgCommon->livenessHasBeenProved = true;
+#endif    
+}
+
+CodeBlock* CodeBlock::baselineAlternative()
+{
+#if ENABLE(JIT)
+    CodeBlock* result = this;
+    while (result->alternative())
+        result = result->alternative();
+    RELEASE_ASSERT(result);
+    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
+    return result;
+#else
+    return this;
+#endif
+}
+
+CodeBlock* CodeBlock::baselineVersion()
+{
+#if ENABLE(JIT)
+    if (JITCode::isBaselineCode(jitType()))
+        return this;
+    CodeBlock* result = replacement();
+    if (!result) {
+        // This can happen if we're creating the original CodeBlock for an executable.
+        // Assume that we're the baseline CodeBlock.
+        RELEASE_ASSERT(jitType() == JITCode::None);
+        return this;
+    }
+    result = result->baselineAlternative();
+    return result;
+#else
+    return this;
+#endif
+}
+
+#if ENABLE(JIT)
+bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
+{
+    return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
+}
+
+bool CodeBlock::hasOptimizedReplacement()
+{
+    return hasOptimizedReplacement(jitType());
+}
+#endif
+
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
+{
+    RELEASE_ASSERT(bytecodeOffset < instructions().size());
+    return handlerForIndex(bytecodeOffset, requiredHandler);
+}
+
+HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
+{
+    if (!m_rareData)
+        return 0;
+    return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
+}
+
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
+{
+#if ENABLE(DFG_JIT)
+    RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
+    RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+    ASSERT(!!handlerForIndex(originalCallSite.bits()));
+    CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+    return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
+#else
+    // We never create new on-the-fly exception handling
+    // call sites outside the DFG/FTL inline caches.
+    UNUSED_PARAM(originalCallSite);
+    RELEASE_ASSERT_NOT_REACHED();
+    return CallSiteIndex(0u);
+#endif
+}
+
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
+{
+    RELEASE_ASSERT(m_rareData);
+    Vector& exceptionHandlers = m_rareData->m_exceptionHandlers;
+    unsigned index = callSiteIndex.bits();
+    for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+        HandlerInfo& handler = exceptionHandlers[i];
+        if (handler.start <= index && handler.end > index) {
+            exceptionHandlers.remove(i);
+            return;
+        }
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+    RELEASE_ASSERT(bytecodeOffset < instructions().size());
+    return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
+}
+
+unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+    int divot;
+    int startOffset;
+    int endOffset;
+    unsigned line;
+    unsigned column;
+    expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    return column;
+}
+
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
+{
+    m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    divot += m_sourceOffset;
+    column += line ? 1 : firstLineColumnOffset();
+    line += ownerScriptExecutable()->firstLine();
+}
+
+bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
+{
+    Interpreter* interpreter = vm()->interpreter;
+    const Instruction* begin = instructions().begin();
+    const Instruction* end = instructions().end();
+    for (const Instruction* it = begin; it != end;) {
+        OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
+        if (opcodeID == op_debug) {
+            unsigned bytecodeOffset = it - begin;
+            int unused;
+            unsigned opDebugLine;
+            unsigned opDebugColumn;
+            expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
+            if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
+                return true;
+        }
+        it += opcodeLengths[opcodeID];
+    }
+    return false;
+}
+
+void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
+{
+    ConcurrentJSLocker locker(m_lock);
+
+    m_rareCaseProfiles.shrinkToFit();
+    
+    if (shrinkMode == EarlyShrink) {
+        m_constantRegisters.shrinkToFit();
+        m_constantsSourceCodeRepresentation.shrinkToFit();
+        
+        if (m_rareData) {
+            m_rareData->m_switchJumpTables.shrinkToFit();
+            m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+        }
+    } // else don't shrink these, because we would have already pointed pointers into these tables.
+}
+
+#if ENABLE(JIT)
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
+{
+    noticeIncomingCall(callerFrame);
+    m_incomingCalls.push(incoming);
+}
+
+void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
+{
+    noticeIncomingCall(callerFrame);
+    m_incomingPolymorphicCalls.push(incoming);
+}
+#endif // ENABLE(JIT)
+
+void CodeBlock::unlinkIncomingCalls()
+{
+    while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+        m_incomingLLIntCalls.begin()->unlink();
+#if ENABLE(JIT)
+    while (m_incomingCalls.begin() != m_incomingCalls.end())
+        m_incomingCalls.begin()->unlink(*vm());
+    while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
+        m_incomingPolymorphicCalls.begin()->unlink(*vm());
+#endif // ENABLE(JIT)
+}
+
+void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
+{
+    noticeIncomingCall(callerFrame);
+    m_incomingLLIntCalls.push(incoming);
+}
+
+CodeBlock* CodeBlock::newReplacement()
+{
+    return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
+}
+
+#if ENABLE(JIT)
+CodeBlock* CodeBlock::replacement()
+{
+    const ClassInfo* classInfo = this->classInfo();
+
+    if (classInfo == FunctionCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+
+    if (classInfo == EvalCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
+
+    if (classInfo == ProgramCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
+
+    if (classInfo == ModuleProgramCodeBlock::info())
+        return jsCast(ownerExecutable())->codeBlock();
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return nullptr;
+}
+
+DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
+{
+    const ClassInfo* classInfo = this->classInfo();
+
+    if (classInfo == FunctionCodeBlock::info()) {
+        if (m_isConstructor)
+            return DFG::functionForConstructCapabilityLevel(this);
+        return DFG::functionForCallCapabilityLevel(this);
+    }
+
+    if (classInfo == EvalCodeBlock::info())
+        return DFG::evalCapabilityLevel(this);
+
+    if (classInfo == ProgramCodeBlock::info())
+        return DFG::programCapabilityLevel(this);
+
+    if (classInfo == ModuleProgramCodeBlock::info())
+        return DFG::programCapabilityLevel(this);
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return DFG::CannotCompile;
+}
+
+#endif // ENABLE(JIT)
+
+void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
+{
+#if !ENABLE(DFG_JIT)
+    UNUSED_PARAM(mode);
+    UNUSED_PARAM(detail);
+#endif
+    
+    CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
+
+    RELEASE_ASSERT(reason != Profiler::NotJettisoned);
+    
+#if ENABLE(DFG_JIT)
+    if (DFG::shouldDumpDisassembly()) {
+        dataLog("Jettisoning ", *this);
+        if (mode == CountReoptimization)
+            dataLog(" and counting reoptimization");
+        dataLog(" due to ", reason);
+        if (detail)
+            dataLog(", ", *detail);
+        dataLog(".\n");
+    }
+    
+    if (reason == Profiler::JettisonDueToWeakReference) {
+        if (DFG::shouldDumpDisassembly()) {
+            dataLog(*this, " will be jettisoned because of the following dead references:\n");
+            DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
+            for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
+                DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
+                JSCell* origin = transition.m_codeOrigin.get();
+                JSCell* from = transition.m_from.get();
+                JSCell* to = transition.m_to.get();
+                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
+                    continue;
+                dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
+            }
+            for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
+                JSCell* weak = dfgCommon->weakReferences[i].get();
+                if (Heap::isMarked(weak))
+                    continue;
+                dataLog("    Weak reference ", RawPointer(weak), ".\n");
+            }
+        }
+    }
+#endif // ENABLE(DFG_JIT)
+
+    DeferGCForAWhile deferGC(*heap());
+    
+    // We want to accomplish two things here:
+    // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
+    //    we should OSR exit at the top of the next bytecode instruction after the return.
+    // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
+
+#if ENABLE(DFG_JIT)
+    if (reason != Profiler::JettisonDueToOldAge) {
+        if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
+            compilation->setJettisonReason(reason, detail);
+        
+        // This accomplishes (1), and does its own book-keeping about whether it has already happened.
+        if (!jitCode()->dfgCommon()->invalidate()) {
+            // We've already been invalidated.
+            RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
+            return;
+        }
+    }
+    
+    if (DFG::shouldDumpDisassembly())
+        dataLog("    Did invalidate ", *this, "\n");
+    
+    // Count the reoptimization if that's what the user wanted.
+    if (mode == CountReoptimization) {
+        // FIXME: Maybe this should call alternative().
+        // https://bugs.webkit.org/show_bug.cgi?id=123677
+        baselineAlternative()->countReoptimization();
+        if (DFG::shouldDumpDisassembly())
+            dataLog("    Did count reoptimization for ", *this, "\n");
+    }
+    
+    if (this != replacement()) {
+        // This means that we were never the entrypoint. This can happen for OSR entry code
+        // blocks.
+        return;
+    }
+
+    if (alternative())
+        alternative()->optimizeAfterWarmUp();
+
+    if (reason != Profiler::JettisonDueToOldAge)
+        tallyFrequentExitSites();
+#endif // ENABLE(DFG_JIT)
+
+    // Jettison can happen during GC. We don't want to install code to a dead executable
+    // because that would add a dead object to the remembered set.
+    if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
+        return;
+
+    // This accomplishes (2).
+    ownerScriptExecutable()->installCode(
+        m_globalObject->vm(), alternative(), codeType(), specializationKind());
+
+#if ENABLE(DFG_JIT)
+    if (DFG::shouldDumpDisassembly())
+        dataLog("    Did install baseline version of ", *this, "\n");
+#endif // ENABLE(DFG_JIT)
+}
+
+JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
+{
+    if (!codeOrigin.inlineCallFrame)
+        return globalObject();
+    return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
+}
+
+class RecursionCheckFunctor {
+public:
+    RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
+        : m_startCallFrame(startCallFrame)
+        , m_codeBlock(codeBlock)
+        , m_depthToCheck(depthToCheck)
+        , m_foundStartCallFrame(false)
+        , m_didRecurse(false)
+    { }
+
+    StackVisitor::Status operator()(StackVisitor& visitor) const
+    {
+        CallFrame* currentCallFrame = visitor->callFrame();
+
+        if (currentCallFrame == m_startCallFrame)
+            m_foundStartCallFrame = true;
+
+        if (m_foundStartCallFrame) {
+            if (visitor->callFrame()->codeBlock() == m_codeBlock) {
+                m_didRecurse = true;
+                return StackVisitor::Done;
+            }
+
+            if (!m_depthToCheck--)
+                return StackVisitor::Done;
+        }
+
+        return StackVisitor::Continue;
+    }
+
+    bool didRecurse() const { return m_didRecurse; }
+
+private:
+    CallFrame* m_startCallFrame;
+    CodeBlock* m_codeBlock;
+    mutable unsigned m_depthToCheck;
+    mutable bool m_foundStartCallFrame;
+    mutable bool m_didRecurse;
+};
+
+void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
+{
+    CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+    
+    if (Options::verboseCallLink())
+        dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
+    
+#if ENABLE(DFG_JIT)
+    if (!m_shouldAlwaysBeInlined)
+        return;
+    
+    if (!callerCodeBlock) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is native.\n");
+        return;
+    }
+
+    if (!hasBaselineJITProfiling())
+        return;
+
+    if (!DFG::mightInlineFunction(this))
+        return;
+
+    if (!canInline(capabilityLevelState()))
+        return;
+    
+    if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is too large.\n");
+        return;
+    }
+
+    if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
+        // If the caller is still in the interpreter, then we can't expect inlining to
+        // happen anytime soon. Assume it's profitable to optimize it separately. This
+        // ensures that a function is SABI only if it is called no more frequently than
+        // any of its callers.
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is in LLInt.\n");
+        return;
+    }
+    
+    if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI bcause caller was already optimized.\n");
+        return;
+    }
+    
+    if (callerCodeBlock->codeType() != FunctionCode) {
+        // If the caller is either eval or global code, assume that that won't be
+        // optimized anytime soon. For eval code this is particularly true since we
+        // delay eval optimization by a *lot*.
+        m_shouldAlwaysBeInlined = false;
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because caller is not a function.\n");
+        return;
+    }
+
+    // Recursive calls won't be inlined.
+    RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
+    vm()->topCallFrame->iterate(functor);
+
+    if (functor.didRecurse()) {
+        if (Options::verboseCallLink())
+            dataLog("    Clearing SABI because recursion was detected.\n");
+        m_shouldAlwaysBeInlined = false;
+        return;
+    }
+    
+    if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
+        dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
+        CRASH();
+    }
+    
+    if (canCompile(callerCodeBlock->capabilityLevelState()))
+        return;
+    
+    if (Options::verboseCallLink())
+        dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
+    
+    m_shouldAlwaysBeInlined = false;
+#endif
+}
+
+unsigned CodeBlock::reoptimizationRetryCounter() const
+{
+#if ENABLE(JIT)
+    ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
+    return m_reoptimizationRetryCounter;
+#else
+    return 0;
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
+{
+    m_calleeSaveRegisters = std::make_unique(calleeSaveRegisters);
+}
+
+void CodeBlock::setCalleeSaveRegisters(std::unique_ptr registerAtOffsetList)
+{
+    m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
+}
+    
+static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
+{
+    static const unsigned cpuRegisterSize = sizeof(void*);
+    return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
+
+}
+
+size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
+{
+    return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
+}
+
+size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
+{
+    return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
+}
+
+void CodeBlock::countReoptimization()
+{
+    m_reoptimizationRetryCounter++;
+    if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
+        m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
+}
+
+unsigned CodeBlock::numberOfDFGCompiles()
+{
+    ASSERT(JITCode::isBaselineCode(jitType()));
+    if (Options::testTheFTL()) {
+        if (m_didFailFTLCompilation)
+            return 1000000;
+        return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
+    }
+    return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
+}
+
+int32_t CodeBlock::codeTypeThresholdMultiplier() const
+{
+    if (codeType() == EvalCode)
+        return Options::evalThresholdMultiplier();
+    
+    return 1;
+}
+
+double CodeBlock::optimizationThresholdScalingFactor()
+{
+    // This expression arises from doing a least-squares fit of
+    //
+    // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
+    //
+    // against the data points:
+    //
+    //    x       F[x_]
+    //    10       0.9          (smallest reasonable code block)
+    //   200       1.0          (typical small-ish code block)
+    //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
+    //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
+    //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
+    // 10000       6.0          (similar to above)
+    //
+    // I achieve the minimization using the following Mathematica code:
+    //
+    // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
+    //
+    // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
+    //
+    // solution = 
+    //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
+    //         {a, b, c, d}][[2]]
+    //
+    // And the code below (to initialize a, b, c, d) is generated by:
+    //
+    // Print["const double " <> ToString[#[[1]]] <> " = " <>
+    //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
+    //
+    // We've long known the following to be true:
+    // - Small code blocks are cheap to optimize and so we should do it sooner rather
+    //   than later.
+    // - Large code blocks are expensive to optimize and so we should postpone doing so,
+    //   and sometimes have a large enough threshold that we never optimize them.
+    // - The difference in cost is not totally linear because (a) just invoking the
+    //   DFG incurs some base cost and (b) for large code blocks there is enough slop
+    //   in the correlation between instruction count and the actual compilation cost
+    //   that for those large blocks, the instruction count should not have a strong
+    //   influence on our threshold.
+    //
+    // I knew the goals but I didn't know how to achieve them; so I picked an interesting
+    // example where the heuristics were right (code block in 3d-cube with instruction
+    // count 320, which got compiled early as it should have been) and one where they were
+    // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
+    // to compile and didn't run often enough to warrant compilation in my opinion), and
+    // then threw in additional data points that represented my own guess of what our
+    // heuristics should do for some round-numbered examples.
+    //
+    // The expression to which I decided to fit the data arose because I started with an
+    // affine function, and then did two things: put the linear part in an Abs to ensure
+    // that the fit didn't end up choosing a negative value of c (which would result in
+    // the function turning over and going negative for large x) and I threw in a Sqrt
+    // term because Sqrt represents my intution that the function should be more sensitive
+    // to small changes in small values of x, but less sensitive when x gets large.
+    
+    // Note that the current fit essentially eliminates the linear portion of the
+    // expression (c == 0.0).
+    const double a = 0.061504;
+    const double b = 1.02406;
+    const double c = 0.0;
+    const double d = 0.825914;
+    
+    double instructionCount = this->instructionCount();
+    
+    ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
+    
+    double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
+    
+    result *= codeTypeThresholdMultiplier();
+    
+    if (Options::verboseOSR()) {
+        dataLog(
+            *this, ": instruction count is ", instructionCount,
+            ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
+            "\n");
+    }
+    return result;
+}
+
+static int32_t clipThreshold(double threshold)
+{
+    if (threshold < 1.0)
+        return 1;
+    
+    if (threshold > static_cast(std::numeric_limits::max()))
+        return std::numeric_limits::max();
+    
+    return static_cast(threshold);
+}
+
+int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
+{
+    return clipThreshold(
+        static_cast(desiredThreshold) *
+        optimizationThresholdScalingFactor() *
+        (1 << reoptimizationRetryCounter()));
+}
+
+bool CodeBlock::checkIfOptimizationThresholdReached()
+{
+#if ENABLE(DFG_JIT)
+    if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
+        if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
+            == DFG::Worklist::Compiled) {
+            optimizeNextInvocation();
+            return true;
+        }
+    }
+#endif
+    
+    return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
+}
+
+void CodeBlock::optimizeNextInvocation()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Optimizing next invocation.\n");
+    m_jitExecuteCounter.setNewThreshold(0, this);
+}
+
+void CodeBlock::dontOptimizeAnytimeSoon()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Not optimizing anytime soon.\n");
+    m_jitExecuteCounter.deferIndefinitely();
+}
+
+void CodeBlock::optimizeAfterWarmUp()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Optimizing after warm-up.\n");
+#if ENABLE(DFG_JIT)
+    m_jitExecuteCounter.setNewThreshold(
+        adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
+#endif
+}
+
+void CodeBlock::optimizeAfterLongWarmUp()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Optimizing after long warm-up.\n");
+#if ENABLE(DFG_JIT)
+    m_jitExecuteCounter.setNewThreshold(
+        adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
+#endif
+}
+
+void CodeBlock::optimizeSoon()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Optimizing soon.\n");
+#if ENABLE(DFG_JIT)
+    m_jitExecuteCounter.setNewThreshold(
+        adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
+#endif
+}
+
+void CodeBlock::forceOptimizationSlowPathConcurrently()
+{
+    if (Options::verboseOSR())
+        dataLog(*this, ": Forcing slow path concurrently.\n");
+    m_jitExecuteCounter.forceSlowPathConcurrently();
+}
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
+{
+    JITCode::JITType type = jitType();
+    if (type != JITCode::BaselineJIT) {
+        dataLog(*this, ": expected to have baseline code but have ", type, "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    CodeBlock* theReplacement = replacement();
+    if ((result == CompilationSuccessful) != (theReplacement != this)) {
+        dataLog(*this, ": we have result = ", result, " but ");
+        if (theReplacement == this)
+            dataLog("we are our own replacement.\n");
+        else
+            dataLog("our replacement is ", pointerDump(theReplacement), "\n");
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    switch (result) {
+    case CompilationSuccessful:
+        RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
+        optimizeNextInvocation();
+        return;
+    case CompilationFailed:
+        dontOptimizeAnytimeSoon();
+        return;
+    case CompilationDeferred:
+        // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
+        // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
+        // necessarily guarantee anything. So, we make sure that even if that
+        // function ends up being a no-op, we still eventually retry and realize
+        // that we have optimized code ready.
+        optimizeAfterWarmUp();
+        return;
+    case CompilationInvalidated:
+        // Retry with exponential backoff.
+        countReoptimization();
+        optimizeAfterWarmUp();
+        return;
+    }
+    
+    dataLog("Unrecognized result: ", static_cast(result), "\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+#endif
+    
+uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
+{
+    ASSERT(JITCode::isOptimizingJIT(jitType()));
+    // Compute this the lame way so we don't saturate. This is called infrequently
+    // enough that this loop won't hurt us.
+    unsigned result = desiredThreshold;
+    for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
+        unsigned newResult = result << 1;
+        if (newResult < result)
+            return std::numeric_limits::max();
+        result = newResult;
+    }
+    return result;
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimization()
+{
+    return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
+}
+
+uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
+{
+    return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
+}
+
+bool CodeBlock::shouldReoptimizeNow()
+{
+    return osrExitCounter() >= exitCountThresholdForReoptimization();
+}
+
+bool CodeBlock::shouldReoptimizeFromLoopNow()
+{
+    return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
+}
+#endif
+
+ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+{
+    for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
+        if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
+            return &m_arrayProfiles[i];
+    }
+    return 0;
+}
+
+ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return getArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
+{
+    m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
+    return &m_arrayProfiles.last();
+}
+
+ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
+{
+    ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
+    if (result)
+        return result;
+    return addArrayProfile(locker, bytecodeOffset);
+}
+
+ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return getOrAddArrayProfile(locker, bytecodeOffset);
+}
+
+#if ENABLE(DFG_JIT)
+Vector& CodeBlock::codeOrigins()
+{
+    return m_jitCode->dfgCommon()->codeOrigins;
+}
+
+size_t CodeBlock::numberOfDFGIdentifiers() const
+{
+    if (!JITCode::isOptimizingJIT(jitType()))
+        return 0;
+    
+    return m_jitCode->dfgCommon()->dfgIdentifiers.size();
+}
+
+const Identifier& CodeBlock::identifier(int index) const
+{
+    size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
+    if (static_cast(index) < unlinkedIdentifiers)
+        return m_unlinkedCode->identifier(index);
+    ASSERT(JITCode::isOptimizingJIT(jitType()));
+    return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
+}
+#endif // ENABLE(DFG_JIT)
+
+void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
+{
+    ConcurrentJSLocker locker(m_lock);
+    
+    numberOfLiveNonArgumentValueProfiles = 0;
+    numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
+    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+        ValueProfile* profile = getFromAllValueProfiles(i);
+        unsigned numSamples = profile->totalNumberOfSamples();
+        if (numSamples > ValueProfile::numberOfBuckets)
+            numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
+        numberOfSamplesInProfiles += numSamples;
+        if (profile->m_bytecodeOffset < 0) {
+            profile->computeUpdatedPrediction(locker);
+            continue;
+        }
+        if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
+            numberOfLiveNonArgumentValueProfiles++;
+        profile->computeUpdatedPrediction(locker);
+    }
+    
+#if ENABLE(DFG_JIT)
+    m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
+#endif
+}
+
+void CodeBlock::updateAllValueProfilePredictions()
+{
+    unsigned ignoredValue1, ignoredValue2;
+    updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
+}
+
+void CodeBlock::updateAllArrayPredictions()
+{
+    ConcurrentJSLocker locker(m_lock);
+    
+    for (unsigned i = m_arrayProfiles.size(); i--;)
+        m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
+    
+    // Don't count these either, for similar reasons.
+    for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
+        m_arrayAllocationProfiles[i].updateIndexingType();
+}
+
+void CodeBlock::updateAllPredictions()
+{
+    updateAllValueProfilePredictions();
+    updateAllArrayPredictions();
+}
+
+bool CodeBlock::shouldOptimizeNow()
+{
+    if (Options::verboseOSR())
+        dataLog("Considering optimizing ", *this, "...\n");
+
+    if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
+        return true;
+    
+    updateAllArrayPredictions();
+    
+    unsigned numberOfLiveNonArgumentValueProfiles;
+    unsigned numberOfSamplesInProfiles;
+    updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
+
+    if (Options::verboseOSR()) {
+        dataLogF(
+            "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
+            (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
+            numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
+            (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
+            numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
+    }
+
+    if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
+        && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
+        && static_cast(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
+        return true;
+    
+    ASSERT(m_optimizationDelayCounter < std::numeric_limits::max());
+    m_optimizationDelayCounter++;
+    optimizeAfterWarmUp();
+    return false;
+}
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::tallyFrequentExitSites()
+{
+    ASSERT(JITCode::isOptimizingJIT(jitType()));
+    ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
+    
+    CodeBlock* profiledBlock = alternative();
+    
+    switch (jitType()) {
+    case JITCode::DFGJIT: {
+        DFG::JITCode* jitCode = m_jitCode->dfg();
+        for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
+            DFG::OSRExit& exit = jitCode->osrExit[i];
+            exit.considerAddingAsFrequentExitSite(profiledBlock);
+        }
+        break;
+    }
+
+#if ENABLE(FTL_JIT)
+    case JITCode::FTLJIT: {
+        // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
+        // vector contains a totally different type, that just so happens to behave like
+        // DFG::JITCode::osrExit.
+        FTL::JITCode* jitCode = m_jitCode->ftl();
+        for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
+            FTL::OSRExit& exit = jitCode->osrExit[i];
+            exit.considerAddingAsFrequentExitSite(profiledBlock);
+        }
+        break;
+    }
+#endif
+        
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+}
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+void CodeBlock::dumpValueProfiles()
+{
+    dataLog("ValueProfile for ", *this, ":\n");
+    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+        ValueProfile* profile = getFromAllValueProfiles(i);
+        if (profile->m_bytecodeOffset < 0) {
+            ASSERT(profile->m_bytecodeOffset == -1);
+            dataLogF("   arg = %u: ", i);
+        } else
+            dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
+        if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
+            dataLogF("\n");
+            continue;
+        }
+        profile->dump(WTF::dataFile());
+        dataLogF("\n");
+    }
+    dataLog("RareCaseProfile for ", *this, ":\n");
+    for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
+        RareCaseProfile* profile = rareCaseProfile(i);
+        dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+    }
+}
+#endif // ENABLE(VERBOSE_VALUE_PROFILE)
+
+unsigned CodeBlock::frameRegisterCount()
+{
+    switch (jitType()) {
+    case JITCode::InterpreterThunk:
+        return LLInt::frameRegisterCountFor(this);
+
+#if ENABLE(JIT)
+    case JITCode::BaselineJIT:
+        return JIT::frameRegisterCountFor(this);
+#endif // ENABLE(JIT)
+
+#if ENABLE(DFG_JIT)
+    case JITCode::DFGJIT:
+    case JITCode::FTLJIT:
+        return jitCode()->dfgCommon()->frameRegisterCount;
+#endif // ENABLE(DFG_JIT)
+        
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return 0;
+    }
+}
+
+int CodeBlock::stackPointerOffset()
+{
+    return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
+}
+
+size_t CodeBlock::predictedMachineCodeSize()
+{
+    // This will be called from CodeBlock::CodeBlock before either m_vm or the
+    // instructions have been initialized. It's OK to return 0 because what will really
+    // matter is the recomputation of this value when the slow path is triggered.
+    if (!m_vm)
+        return 0;
+    
+    if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
+        return 0; // It's as good of a prediction as we'll get.
+    
+    // Be conservative: return a size that will be an overestimation 84% of the time.
+    double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
+        m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
+    
+    // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
+    // here is OK, since this whole method is just a heuristic.
+    if (multiplier < 0 || multiplier > 1000)
+        return 0;
+    
+    double doubleResult = multiplier * m_instructions.size();
+    
+    // Be even more paranoid: silently reject values that won't fit into a size_t. If
+    // the function is so huge that we can't even fit it into virtual memory then we
+    // should probably have some other guards in place to prevent us from even getting
+    // to this point.
+    if (doubleResult > std::numeric_limits::max())
+        return 0;
+    
+    return static_cast(doubleResult);
+}
+
+bool CodeBlock::usesOpcode(OpcodeID opcodeID)
+{
+    Interpreter* interpreter = vm()->interpreter;
+    Instruction* instructionsBegin = instructions().begin();
+    unsigned instructionCount = instructions().size();
+    
+    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
+        switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
+#define DEFINE_OP(curOpcode, length)        \
+        case curOpcode:                     \
+            if (curOpcode == opcodeID)      \
+                return true;                \
+            bytecodeOffset += length;       \
+            break;
+            FOR_EACH_OPCODE_ID(DEFINE_OP)
+#undef DEFINE_OP
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+    }
+    
+    return false;
+}
+
+String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
+{
+    for (unsigned i = 0; i < m_constantRegisters.size(); i++) {
+        if (m_constantRegisters[i].get().isEmpty())
+            continue;
+        if (SymbolTable* symbolTable = jsDynamicCast(m_constantRegisters[i].get())) {
+            ConcurrentJSLocker locker(symbolTable->m_lock);
+            auto end = symbolTable->end(locker);
+            for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
+                if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
+                    // FIXME: This won't work from the compilation thread.
+                    // https://bugs.webkit.org/show_bug.cgi?id=115300
+                    return ptr->key.get();
+                }
+            }
+        }
+    }
+    if (virtualRegister == thisRegister())
+        return ASCIILiteral("this");
+    if (virtualRegister.isArgument())
+        return String::format("arguments[%3d]", virtualRegister.toArgument());
+
+    return "";
+}
+
+ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
+{
+    OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset].u.opcode);
+    unsigned length = opcodeLength(opcodeID);
+    return instructions()[bytecodeOffset + length - 1].u.profile;
+}
+
+void CodeBlock::validate()
+{
+    BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
+    
+    FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
+    
+    if (liveAtHead.numBits() != static_cast(m_numCalleeLocals)) {
+        beginValidationDidFail();
+        dataLog("    Wrong number of bits in result!\n");
+        dataLog("    Result: ", liveAtHead, "\n");
+        dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
+        endValidationDidFail();
+    }
+    
+    for (unsigned i = m_numCalleeLocals; i--;) {
+        VirtualRegister reg = virtualRegisterForLocal(i);
+        
+        if (liveAtHead[i]) {
+            beginValidationDidFail();
+            dataLog("    Variable ", reg, " is expected to be dead.\n");
+            dataLog("    Result: ", liveAtHead, "\n");
+            endValidationDidFail();
+        }
+    }
+}
+
+void CodeBlock::beginValidationDidFail()
+{
+    dataLog("Validation failure in ", *this, ":\n");
+    dataLog("\n");
+}
+
+void CodeBlock::endValidationDidFail()
+{
+    dataLog("\n");
+    dumpBytecode();
+    dataLog("\n");
+    dataLog("Validation failure.\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void CodeBlock::addBreakpoint(unsigned numBreakpoints)
+{
+    m_numBreakpoints += numBreakpoints;
+    ASSERT(m_numBreakpoints);
+    if (JITCode::isOptimizingJIT(jitType()))
+        jettison(Profiler::JettisonDueToDebuggerBreakpoint);
+}
+
+void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
+{
+    m_steppingMode = mode;
+    if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
+        jettison(Profiler::JettisonDueToDebuggerStepping);
+}
+
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+    m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+    return &m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+{
+    return tryBinarySearch(
+        m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
+        getRareCaseProfileBytecodeOffset);
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
+{
+    RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
+    if (profile)
+        return profile->m_counter;
+    return 0;
+}
+
+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
+{
+    return arithProfileForPC(instructions().begin() + bytecodeOffset);
+}
+
+ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
+{
+    auto opcodeID = vm()->interpreter->getOpcodeID(pc[0].u.opcode);
+    switch (opcodeID) {
+    case op_negate:
+        return bitwise_cast(&pc[3].u.operand);
+    case op_bitor:
+    case op_bitand:
+    case op_bitxor:
+    case op_add:
+    case op_mul:
+    case op_sub:
+    case op_div:
+        return bitwise_cast(&pc[4].u.operand);
+    default:
+        break;
+    }
+
+    return nullptr;
+}
+
+bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
+{
+    if (!hasBaselineJITProfiling())
+        return false;
+    ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
+    if (!profile)
+        return false;
+    return profile->tookSpecialFastPath();
+}
+
+#if ENABLE(JIT)
+DFG::CapabilityLevel CodeBlock::capabilityLevel()
+{
+    DFG::CapabilityLevel result = computeCapabilityLevel();
+    m_capabilityLevelState = result;
+    return result;
+}
+#endif
+
+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray& instructions)
+{
+    if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
+        return;
+    const Vector& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
+    for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
+        // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
+        // the next op_profile_control_flow will give us the text range of a single basic block.
+        size_t startIdx = bytecodeOffsets[i];
+        RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow);
+        int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
+        int basicBlockEndOffset;
+        if (i + 1 < offsetsLength) {
+            size_t endIdx = bytecodeOffsets[i + 1];
+            RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow);
+            basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
+        } else {
+            basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
+            basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
+        }
+
+        // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
+        // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
+        // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
+        // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
+        // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
+        // program. The condition: 
+        // (basicBlockEndOffset < basicBlockStartOffset) 
+        // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
+        // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
+        // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
+        // internal data structure, so if any of them execute, it will record the same textual basic block in the 
+        // JavaScript program as executing.
+        // At the bytecode level, this situation looks like:
+        // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
+        // ...
+        // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
+        // ...
+        // m: op_profile_control_flow
+        if (basicBlockEndOffset < basicBlockStartOffset) {
+            RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
+            instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
+            continue;
+        }
+
+        BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
+
+        // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
+        // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
+        // This is necessary because in the original source text of a JavaScript program, 
+        // function literals form new basic blocks boundaries, but they aren't represented 
+        // inside the CodeBlock's instruction stream.
+        auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier& functionExecutable) {
+            const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
+            int functionStart = executable->typeProfilingStartOffset();
+            int functionEnd = executable->typeProfilingEndOffset();
+            if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
+                basicBlockLocation->insertGap(functionStart, functionEnd);
+        };
+
+        for (const WriteBarrier& executable : m_functionDecls)
+            insertFunctionGaps(executable);
+        for (const WriteBarrier& executable : m_functionExprs)
+            insertFunctionGaps(executable);
+
+        instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
+    }
+}
+
+#if ENABLE(JIT)
+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr&& map) 
+{ 
+    m_pcToCodeOriginMap = WTFMove(map);
+}
+
+std::optional CodeBlock::findPC(void* pc)
+{
+    if (m_pcToCodeOriginMap) {
+        if (std::optional codeOrigin = m_pcToCodeOriginMap->findPC(pc))
+            return codeOrigin;
+    }
+
+    for (Bag::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+        StructureStubInfo* stub = *iter;
+        if (stub->containsPC(pc))
+            return std::optional(stub->codeOrigin);
+    }
+
+    if (std::optional codeOrigin = m_jitCode->findPC(this, pc))
+        return codeOrigin;
+
+    return std::nullopt;
+}
+#endif // ENABLE(JIT)
+
+std::optional CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
+{
+    std::optional bytecodeOffset;
+    JITCode::JITType jitType = this->jitType();
+    if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
+#if USE(JSVALUE64)
+        bytecodeOffset = callSiteIndex.bits();
+#else
+        Instruction* instruction = bitwise_cast(callSiteIndex.bits());
+        bytecodeOffset = instruction - instructions().begin();
+#endif
+    } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
+#if ENABLE(DFG_JIT)
+        RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
+        CodeOrigin origin = codeOrigin(callSiteIndex);
+        bytecodeOffset = origin.bytecodeIndex;
+#else
+        RELEASE_ASSERT_NOT_REACHED();
+#endif
+    }
+
+    return bytecodeOffset;
+}
+
+int32_t CodeBlock::thresholdForJIT(int32_t threshold)
+{
+    switch (unlinkedCodeBlock()->didOptimize()) {
+    case MixedTriState:
+        return threshold;
+    case FalseTriState:
+        return threshold * 4;
+    case TrueTriState:
+        return threshold / 2;
+    }
+    ASSERT_NOT_REACHED();
+    return threshold;
+}
+
+void CodeBlock::jitAfterWarmUp()
+{
+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
+}
+
+void CodeBlock::jitSoon()
+{
+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
+}
+
+void CodeBlock::dumpMathICStats()
+{
+#if ENABLE(MATH_IC_STATS)
+    double numAdds = 0.0;
+    double totalAddSize = 0.0;
+    double numMuls = 0.0;
+    double totalMulSize = 0.0;
+    double numNegs = 0.0;
+    double totalNegSize = 0.0;
+    double numSubs = 0.0;
+    double totalSubSize = 0.0;
+
+    auto countICs = [&] (CodeBlock* codeBlock) {
+        for (JITAddIC* addIC : codeBlock->m_addICs) {
+            numAdds++;
+            totalAddSize += addIC->codeSize();
+        }
+
+        for (JITMulIC* mulIC : codeBlock->m_mulICs) {
+            numMuls++;
+            totalMulSize += mulIC->codeSize();
+        }
+
+        for (JITNegIC* negIC : codeBlock->m_negICs) {
+            numNegs++;
+            totalNegSize += negIC->codeSize();
+        }
+
+        for (JITSubIC* subIC : codeBlock->m_subICs) {
+            numSubs++;
+            totalSubSize += subIC->codeSize();
+        }
+
+        return false;
+    };
+    heap()->forEachCodeBlock(countICs);
+
+    dataLog("Num Adds: ", numAdds, "\n");
+    dataLog("Total Add size in bytes: ", totalAddSize, "\n");
+    dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
+    dataLog("\n");
+    dataLog("Num Muls: ", numMuls, "\n");
+    dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
+    dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
+    dataLog("\n");
+    dataLog("Num Negs: ", numNegs, "\n");
+    dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
+    dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
+    dataLog("\n");
+    dataLog("Num Subs: ", numSubs, "\n");
+    dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
+    dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
+
+    dataLog("-----------------------\n");
+#endif
+}
+
+BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
+{
+    std::unique_ptr analysis = std::make_unique(this);
+    {
+        ConcurrentJSLocker locker(m_lock);
+        if (!m_livenessAnalysis)
+            m_livenessAnalysis = WTFMove(analysis);
+        return *m_livenessAnalysis;
+    }
+}
+
+
+} // namespace JSC
diff --git a/bytecode/CodeBlock.h b/bytecode/CodeBlock.h
new file mode 100644
index 0000000..1159bb3
--- /dev/null
+++ b/bytecode/CodeBlock.h
@@ -0,0 +1,1101 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ArrayProfile.h"
+#include "ByValInfo.h"
+#include "BytecodeConventions.h"
+#include "CallLinkInfo.h"
+#include "CallReturnOffsetToBytecodeOffset.h"
+#include "CodeBlockHash.h"
+#include "CodeOrigin.h"
+#include "CodeType.h"
+#include "CompactJITCodeMap.h"
+#include "ConcurrentJSLock.h"
+#include "DFGCommon.h"
+#include "DFGExitProfile.h"
+#include "DeferredCompilationCallback.h"
+#include "DirectEvalCodeCache.h"
+#include "EvalExecutable.h"
+#include "ExecutionCounter.h"
+#include "ExpressionRangeInfo.h"
+#include "FunctionExecutable.h"
+#include "HandlerInfo.h"
+#include "Instruction.h"
+#include "JITCode.h"
+#include "JITMathICForwards.h"
+#include "JSCell.h"
+#include "JSGlobalObject.h"
+#include "JumpTable.h"
+#include "LLIntCallLinkInfo.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
+#include "LazyOperandValueProfile.h"
+#include "ModuleProgramExecutable.h"
+#include "ObjectAllocationProfile.h"
+#include "Options.h"
+#include "ProfilerJettisonReason.h"
+#include "ProgramExecutable.h"
+#include "PutPropertySlot.h"
+#include "UnconditionalFinalizer.h"
+#include "ValueProfile.h"
+#include "VirtualRegister.h"
+#include "Watchpoint.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+class CodeBlockSet;
+class ExecState;
+class JSModuleEnvironment;
+class LLIntOffsetsExtractor;
+class PCToCodeOriginMap;
+class RegisterAtOffsetList;
+class StructureStubInfo;
+
+enum class AccessType : int8_t;
+
+struct ArithProfile;
+
+typedef HashMap StubInfoMap;
+
+enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
+
+class CodeBlock : public JSCell {
+    typedef JSCell Base;
+    friend class BytecodeLivenessAnalysis;
+    friend class JIT;
+    friend class LLIntOffsetsExtractor;
+
+    class UnconditionalFinalizer : public JSC::UnconditionalFinalizer { 
+        void finalizeUnconditionally() override;
+    };
+
+    class WeakReferenceHarvester : public JSC::WeakReferenceHarvester {
+        void visitWeakReferences(SlotVisitor&) override;
+    };
+
+public:
+    enum CopyParsedBlockTag { CopyParsedBlock };
+
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    DECLARE_INFO;
+
+protected:
+    CodeBlock(VM*, Structure*, CopyParsedBlockTag, CodeBlock& other);
+    CodeBlock(VM*, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr, unsigned sourceOffset, unsigned firstLineColumnOffset);
+
+    void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
+    void finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
+
+    WriteBarrier m_globalObject;
+
+public:
+    JS_EXPORT_PRIVATE ~CodeBlock();
+
+    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
+
+    CString inferredName() const;
+    CodeBlockHash hash() const;
+    bool hasHash() const;
+    bool isSafeToComputeHash() const;
+    CString hashAsStringIfPossible() const;
+    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
+    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
+    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+
+    int numParameters() const { return m_numParameters; }
+    void setNumParameters(int newValue);
+
+    int numCalleeLocals() const { return m_numCalleeLocals; }
+
+    int* addressOfNumParameters() { return &m_numParameters; }
+    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
+
+    CodeBlock* alternative() const { return static_cast(m_alternative.get()); }
+    void setAlternative(VM&, CodeBlock*);
+
+    template  void forEachRelatedCodeBlock(Functor&& functor)
+    {
+        Functor f(std::forward(functor));
+        Vector codeBlocks;
+        codeBlocks.append(this);
+
+        while (!codeBlocks.isEmpty()) {
+            CodeBlock* currentCodeBlock = codeBlocks.takeLast();
+            f(currentCodeBlock);
+
+            if (CodeBlock* alternative = currentCodeBlock->alternative())
+                codeBlocks.append(alternative);
+            if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
+                codeBlocks.append(osrEntryBlock);
+        }
+    }
+    
+    CodeSpecializationKind specializationKind() const
+    {
+        return specializationFromIsConstruct(m_isConstructor);
+    }
+
+    CodeBlock* alternativeForJettison();    
+    JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
+    
+    // FIXME: Get rid of this.
+    // https://bugs.webkit.org/show_bug.cgi?id=123677
+    CodeBlock* baselineVersion();
+
+    static size_t estimatedSize(JSCell*);
+    static void visitChildren(JSCell*, SlotVisitor&);
+    void visitChildren(SlotVisitor&);
+    void visitWeakly(SlotVisitor&);
+    void clearVisitWeaklyHasBeenCalled();
+
+    void dumpSource();
+    void dumpSource(PrintStream&);
+
+    void dumpBytecode();
+    void dumpBytecode(PrintStream&);
+    void dumpBytecode(
+        PrintStream&, unsigned bytecodeOffset,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+    void dumpExceptionHandlers(PrintStream&);
+    void printStructures(PrintStream&, const Instruction*);
+    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
+
+    void dumpMathICStats();
+
+    bool isStrictMode() const { return m_isStrictMode; }
+    ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
+
+    inline bool isKnownNotImmediate(int index)
+    {
+        if (index == m_thisRegister.offset() && !m_isStrictMode)
+            return true;
+
+        if (isConstantRegisterIndex(index))
+            return getConstant(index).isCell();
+
+        return false;
+    }
+
+    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
+    {
+        return index >= m_numVars;
+    }
+
+    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+    void removeExceptionHandlerForCallSite(CallSiteIndex);
+    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
+    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+        int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
+
+    std::optional bytecodeOffsetFromCallSiteIndex(CallSiteIndex);
+
+    void getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result);
+    void getStubInfoMap(StubInfoMap& result);
+    
+    void getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result);
+    void getCallLinkInfoMap(CallLinkInfoMap& result);
+
+    void getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result);
+    void getByValInfoMap(ByValInfoMap& result);
+    
+#if ENABLE(JIT)
+    StructureStubInfo* addStubInfo(AccessType);
+    JITAddIC* addJITAddIC(ArithProfile*);
+    JITMulIC* addJITMulIC(ArithProfile*);
+    JITNegIC* addJITNegIC(ArithProfile*);
+    JITSubIC* addJITSubIC(ArithProfile*);
+    Bag::iterator stubInfoBegin() { return m_stubInfos.begin(); }
+    Bag::iterator stubInfoEnd() { return m_stubInfos.end(); }
+    
+    // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
+    // stub info.
+    StructureStubInfo* findStubInfo(CodeOrigin);
+
+    ByValInfo* addByValInfo();
+
+    CallLinkInfo* addCallLinkInfo();
+    Bag::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
+    Bag::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
+
+    // This is a slow function call used primarily for compiling OSR exits in the case
+    // that there had been inlining. Chances are if you want to use this, you're really
+    // looking for a CallLinkInfoMap to amortize the cost of calling this.
+    CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+    
+    // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
+    // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
+    // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
+    // would be able to get rid of this silly function.
+    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
+    void resetJITData();
+#endif // ENABLE(JIT)
+
+    void unlinkIncomingCalls();
+
+#if ENABLE(JIT)
+    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
+    void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
+#endif // ENABLE(JIT)
+
+    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
+
+    void setJITCodeMap(std::unique_ptr jitCodeMap)
+    {
+        m_jitCodeMap = WTFMove(jitCodeMap);
+    }
+    CompactJITCodeMap* jitCodeMap()
+    {
+        return m_jitCodeMap.get();
+    }
+    
+    static void clearLLIntGetByIdCache(Instruction*);
+
+    unsigned bytecodeOffset(Instruction* returnAddress)
+    {
+        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
+        return static_cast(returnAddress) - instructions().begin();
+    }
+
+    typedef JSC::Instruction Instruction;
+    typedef RefCountedArray& UnpackedInstructions;
+
+    unsigned numberOfInstructions() const { return m_instructions.size(); }
+    RefCountedArray& instructions() { return m_instructions; }
+    const RefCountedArray& instructions() const { return m_instructions; }
+
+    size_t predictedMachineCodeSize();
+
+    bool usesOpcode(OpcodeID);
+
+    unsigned instructionCount() const { return m_instructions.size(); }
+
+    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
+    CodeBlock* newReplacement();
+    
+    void setJITCode(PassRefPtr code)
+    {
+        ASSERT(heap()->isDeferred());
+        heap()->reportExtraMemoryAllocated(code->size());
+        ConcurrentJSLocker locker(m_lock);
+        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
+        m_jitCode = code;
+    }
+    PassRefPtr jitCode() { return m_jitCode; }
+    static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
+    JITCode::JITType jitType() const
+    {
+        JITCode* jitCode = m_jitCode.get();
+        WTF::loadLoadFence();
+        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
+        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
+        return result;
+    }
+
+    bool hasBaselineJITProfiling() const
+    {
+        return jitType() == JITCode::BaselineJIT;
+    }
+    
+#if ENABLE(JIT)
+    CodeBlock* replacement();
+
+    DFG::CapabilityLevel computeCapabilityLevel();
+    DFG::CapabilityLevel capabilityLevel();
+    DFG::CapabilityLevel capabilityLevelState() { return static_cast(m_capabilityLevelState); }
+
+    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
+    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
+#endif
+
+    void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
+    
+    ExecutableBase* ownerExecutable() const { return m_ownerExecutable.get(); }
+    ScriptExecutable* ownerScriptExecutable() const { return jsCast(m_ownerExecutable.get()); }
+
+    VM* vm() const { return m_vm; }
+
+    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
+    VirtualRegister thisRegister() const { return m_thisRegister; }
+
+    bool usesEval() const { return m_unlinkedCode->usesEval(); }
+
+    void setScopeRegister(VirtualRegister scopeRegister)
+    {
+        ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
+        m_scopeRegister = scopeRegister;
+    }
+
+    VirtualRegister scopeRegister() const
+    {
+        return m_scopeRegister;
+    }
+    
+    CodeType codeType() const
+    {
+        return static_cast(m_codeType);
+    }
+
+    PutPropertySlot::Context putByIdContext() const
+    {
+        if (codeType() == EvalCode)
+            return PutPropertySlot::PutByIdEval;
+        return PutPropertySlot::PutById;
+    }
+
+    SourceProvider* source() const { return m_source.get(); }
+    unsigned sourceOffset() const { return m_sourceOffset; }
+    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
+
+    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
+    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
+
+    String nameForRegister(VirtualRegister);
+
+    unsigned numberOfArgumentValueProfiles()
+    {
+        ASSERT(m_numParameters >= 0);
+        ASSERT(m_argumentValueProfiles.size() == static_cast(m_numParameters));
+        return m_argumentValueProfiles.size();
+    }
+    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
+    {
+        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
+        ASSERT(result->m_bytecodeOffset == -1);
+        return result;
+    }
+
+    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
+    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
+    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
+    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+    {
+        if (ValueProfile* valueProfile = valueProfileForBytecodeOffset(bytecodeOffset))
+            return valueProfile->computeUpdatedPrediction(locker);
+        return SpecNone;
+    }
+
+    unsigned totalNumberOfValueProfiles()
+    {
+        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
+    }
+    ValueProfile* getFromAllValueProfiles(unsigned index)
+    {
+        if (index < numberOfArgumentValueProfiles())
+            return valueProfileForArgument(index);
+        return valueProfile(index - numberOfArgumentValueProfiles());
+    }
+
+    RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
+    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
+    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
+    unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
+
+    bool likelyToTakeSlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
+        return value >= Options::likelyToTakeSlowCaseMinimumCount();
+    }
+
+    bool couldTakeSlowCase(int bytecodeOffset)
+    {
+        if (!hasBaselineJITProfiling())
+            return false;
+        unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
+        return value >= Options::couldTakeSlowCaseMinimumCount();
+    }
+
+    ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset);
+    ArithProfile* arithProfileForPC(Instruction*);
+
+    bool couldTakeSpecialFastCase(int bytecodeOffset);
+
+    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
+    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
+    ArrayProfile* addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+    ArrayProfile* addArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
+    ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset);
+    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
+
+    // Exception handling support
+
+    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
+    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+
+    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
+
+#if ENABLE(DFG_JIT)
+    Vector& codeOrigins();
+    
+    // Having code origins implies that there has been some inlining.
+    bool hasCodeOrigins()
+    {
+        return JITCode::isOptimizingJIT(jitType());
+    }
+        
+    bool canGetCodeOrigin(CallSiteIndex index)
+    {
+        if (!hasCodeOrigins())
+            return false;
+        return index.bits() < codeOrigins().size();
+    }
+
+    CodeOrigin codeOrigin(CallSiteIndex index)
+    {
+        return codeOrigins()[index.bits()];
+    }
+
+    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
+    {
+        ASSERT(JITCode::isBaselineCode(jitType()));
+        ConcurrentJSLocker locker(m_lock);
+        return m_exitProfile.add(locker, this, site);
+    }
+
+    bool hasExitSite(const ConcurrentJSLocker& locker, const DFG::FrequentExitSite& site) const
+    {
+        return m_exitProfile.hasExitSite(locker, site);
+    }
+    bool hasExitSite(const DFG::FrequentExitSite& site) const
+    {
+        ConcurrentJSLocker locker(m_lock);
+        return hasExitSite(locker, site);
+    }
+
+    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+
+    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+    {
+        return m_lazyOperandValueProfiles;
+    }
+#endif // ENABLE(DFG_JIT)
+
+    // Constant Pool
+#if ENABLE(DFG_JIT)
+    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
+    size_t numberOfDFGIdentifiers() const;
+    const Identifier& identifier(int index) const;
+#else
+    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
+    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
+#endif
+
+    Vector>& constants() { return m_constantRegisters; }
+    Vector& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
+    unsigned addConstant(JSValue v)
+    {
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier());
+        m_constantRegisters.last().set(m_globalObject->vm(), this, v);
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
+        return result;
+    }
+
+    unsigned addConstantLazily()
+    {
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier());
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
+        return result;
+    }
+
+    WriteBarrier& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+    static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; }
+    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+    ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
+
+    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+    int numberOfFunctionDecls() { return m_functionDecls.size(); }
+    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
+    
+    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
+
+    unsigned numberOfConstantBuffers() const
+    {
+        if (!m_rareData)
+            return 0;
+        return m_rareData->m_constantBuffers.size();
+    }
+    unsigned addConstantBuffer(const Vector& buffer)
+    {
+        createRareDataIfNecessary();
+        unsigned size = m_rareData->m_constantBuffers.size();
+        m_rareData->m_constantBuffers.append(buffer);
+        return size;
+    }
+
+    Vector& constantBufferAsVector(unsigned index)
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_constantBuffers[index];
+    }
+    JSValue* constantBuffer(unsigned index)
+    {
+        return constantBufferAsVector(index).data();
+    }
+
+    Heap* heap() const { return &m_vm->heap; }
+    JSGlobalObject* globalObject() { return m_globalObject.get(); }
+
+    JSGlobalObject* globalObjectFor(CodeOrigin);
+
+    BytecodeLivenessAnalysis& livenessAnalysis()
+    {
+        {
+            ConcurrentJSLocker locker(m_lock);
+            if (!!m_livenessAnalysis)
+                return *m_livenessAnalysis;
+        }
+        return livenessAnalysisSlow();
+    }
+    
+    void validate();
+
+    // Jump Tables
+
+    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
+    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
+    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
+    void clearSwitchJumpTables()
+    {
+        if (!m_rareData)
+            return;
+        m_rareData->m_switchJumpTables.clear();
+    }
+
+    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
+    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
+    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
+
+    DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
+
+    enum ShrinkMode {
+        // Shrink prior to generating machine code that may point directly into vectors.
+        EarlyShrink,
+
+        // Shrink after generating machine code, and after possibly creating new vectors
+        // and appending to others. At this time it is not safe to shrink certain vectors
+        // because we would have generated machine code that references them directly.
+        LateShrink
+    };
+    void shrinkToFit(ShrinkMode);
+
+    // Functions for controlling when JITting kicks in, in a mixed mode
+    // execution world.
+
+    bool checkIfJITThresholdReached()
+    {
+        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
+    }
+
+    void dontJITAnytimeSoon()
+    {
+        m_llintExecuteCounter.deferIndefinitely();
+    }
+
+    int32_t thresholdForJIT(int32_t threshold);
+    void jitAfterWarmUp();
+    void jitSoon();
+
+    const BaselineExecutionCounter& llintExecuteCounter() const
+    {
+        return m_llintExecuteCounter;
+    }
+
+    typedef HashMap> StructureWatchpointMap;
+    StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
+
+    // Functions for controlling when tiered compilation kicks in. This
+    // controls both when the optimizing compiler is invoked and when OSR
+    // entry happens. Two triggers exist: the loop trigger and the return
+    // trigger. In either case, when an addition to m_jitExecuteCounter
+    // causes it to become non-negative, the optimizing compiler is
+    // invoked. This includes a fast check to see if this CodeBlock has
+    // already been optimized (i.e. replacement() returns a CodeBlock
+    // that was optimized with a higher tier JIT than this one). In the
+    // case of the loop trigger, if the optimized compilation succeeds
+    // (or has already succeeded in the past) then OSR is attempted to
+    // redirect program flow into the optimized code.
+
+    // These functions are called from within the optimization triggers,
+    // and are used as a single point at which we define the heuristics
+    // for how much warm-up is mandated before the next optimization
+    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
+    // as this is called from the CodeBlock constructor.
+
+    // When we observe a lot of speculation failures, we trigger a
+    // reoptimization. But each time, we increase the optimization trigger
+    // to avoid thrashing.
+    JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
+    void countReoptimization();
+#if ENABLE(JIT)
+    static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
+    static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
+    size_t calleeSaveSpaceAsVirtualRegisters();
+
+    unsigned numberOfDFGCompiles();
+
+    int32_t codeTypeThresholdMultiplier() const;
+
+    int32_t adjustedCounterValue(int32_t desiredThreshold);
+
+    int32_t* addressOfJITExecuteCounter()
+    {
+        return &m_jitExecuteCounter.m_counter;
+    }
+
+    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
+    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
+    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
+
+    const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
+
+    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+    // Check if the optimization threshold has been reached, and if not,
+    // adjust the heuristics accordingly. Returns true if the threshold has
+    // been reached.
+    bool checkIfOptimizationThresholdReached();
+
+    // Call this to force the next optimization trigger to fire. This is
+    // rarely wise, since optimization triggers are typically more
+    // expensive than executing baseline code.
+    void optimizeNextInvocation();
+
+    // Call this to prevent optimization from happening again. Note that
+    // optimization will still happen after roughly 2^29 invocations,
+    // so this is really meant to delay that as much as possible. This
+    // is called if optimization failed, and we expect it to fail in
+    // the future as well.
+    void dontOptimizeAnytimeSoon();
+
+    // Call this to reinitialize the counter to its starting state,
+    // forcing a warm-up to happen before the next optimization trigger
+    // fires. This is called in the CodeBlock constructor. It also
+    // makes sense to call this if an OSR exit occurred. Note that
+    // OSR exit code is code generated, so the value of the execute
+    // counter that this corresponds to is also available directly.
+    void optimizeAfterWarmUp();
+
+    // Call this to force an optimization trigger to fire only after
+    // a lot of warm-up.
+    void optimizeAfterLongWarmUp();
+
+    // Call this to cause an optimization trigger to fire soon, but
+    // not necessarily the next one. This makes sense if optimization
+    // succeeds. Successfuly optimization means that all calls are
+    // relinked to the optimized code, so this only affects call
+    // frames that are still executing this CodeBlock. The value here
+    // is tuned to strike a balance between the cost of OSR entry
+    // (which is too high to warrant making every loop back edge to
+    // trigger OSR immediately) and the cost of executing baseline
+    // code (which is high enough that we don't necessarily want to
+    // have a full warm-up). The intuition for calling this instead of
+    // optimizeNextInvocation() is for the case of recursive functions
+    // with loops. Consider that there may be N call frames of some
+    // recursive function, for a reasonably large value of N. The top
+    // one triggers optimization, and then returns, and then all of
+    // the others return. We don't want optimization to be triggered on
+    // each return, as that would be superfluous. It only makes sense
+    // to trigger optimization if one of those functions becomes hot
+    // in the baseline code.
+    void optimizeSoon();
+
+    void forceOptimizationSlowPathConcurrently();
+
+    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
+    
+    uint32_t osrExitCounter() const { return m_osrExitCounter; }
+
+    void countOSRExit() { m_osrExitCounter++; }
+
+    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
+
+    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
+
+    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
+    uint32_t exitCountThresholdForReoptimization();
+    uint32_t exitCountThresholdForReoptimizationFromLoop();
+    bool shouldReoptimizeNow();
+    bool shouldReoptimizeFromLoopNow();
+
+    void setCalleeSaveRegisters(RegisterSet);
+    void setCalleeSaveRegisters(std::unique_ptr);
+    
+    RegisterAtOffsetList* calleeSaveRegisters() const { return m_calleeSaveRegisters.get(); }
+#else // No JIT
+    static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
+    static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };
+    void optimizeAfterWarmUp() { }
+    unsigned numberOfDFGCompiles() { return 0; }
+#endif
+
+    bool shouldOptimizeNow();
+    void updateAllValueProfilePredictions();
+    void updateAllArrayPredictions();
+    void updateAllPredictions();
+
+    unsigned frameRegisterCount();
+    int stackPointerOffset();
+
+    bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
+
+    bool hasDebuggerRequests() const { return m_debuggerRequests; }
+    void* debuggerRequestsAddress() { return &m_debuggerRequests; }
+
+    void addBreakpoint(unsigned numBreakpoints);
+    void removeBreakpoint(unsigned numBreakpoints)
+    {
+        ASSERT(m_numBreakpoints >= numBreakpoints);
+        m_numBreakpoints -= numBreakpoints;
+    }
+
+    enum SteppingMode {
+        SteppingModeDisabled,
+        SteppingModeEnabled
+    };
+    void setSteppingMode(SteppingMode);
+
+    void clearDebuggerRequests()
+    {
+        m_steppingMode = SteppingModeDisabled;
+        m_numBreakpoints = 0;
+    }
+
+    bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
+    
+    // FIXME: Make these remaining members private.
+
+    int m_numCalleeLocals;
+    int m_numVars;
+    
+    // This is intentionally public; it's the responsibility of anyone doing any
+    // of the following to hold the lock:
+    //
+    // - Modifying any inline cache in this code block.
+    //
+    // - Quering any inline cache in this code block, from a thread other than
+    //   the main thread.
+    //
+    // Additionally, it's only legal to modify the inline cache on the main
+    // thread. This means that the main thread can query the inline cache without
+    // locking. This is crucial since executing the inline cache is effectively
+    // "querying" it.
+    //
+    // Another exception to the rules is that the GC can do whatever it wants
+    // without holding any locks, because the GC is guaranteed to wait until any
+    // concurrent compilation threads finish what they're doing.
+    mutable ConcurrentJSLock m_lock;
+
+    bool m_visitWeaklyHasBeenCalled;
+
+    bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
+
+#if ENABLE(JIT)
+    unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
+#endif
+
+    bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
+
+    bool m_didFailJITCompilation : 1;
+    bool m_didFailFTLCompilation : 1;
+    bool m_hasBeenCompiledWithFTL : 1;
+    bool m_isConstructor : 1;
+    bool m_isStrictMode : 1;
+    unsigned m_codeType : 2; // CodeType
+
+    // Internal methods for use by validation code. It would be private if it wasn't
+    // for the fact that we use it from anonymous namespaces.
+    void beginValidationDidFail();
+    NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
+
+    struct RareData {
+        WTF_MAKE_FAST_ALLOCATED;
+    public:
+        Vector m_exceptionHandlers;
+
+        // Buffers used for large array literals
+        Vector> m_constantBuffers;
+
+        // Jump Tables
+        Vector m_switchJumpTables;
+        Vector m_stringSwitchJumpTables;
+
+        DirectEvalCodeCache m_directEvalCodeCache;
+    };
+
+    void clearExceptionHandlers()
+    {
+        if (m_rareData)
+            m_rareData->m_exceptionHandlers.clear();
+    }
+
+    void appendExceptionHandler(const HandlerInfo& handler)
+    {
+        createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
+        m_rareData->m_exceptionHandlers.append(handler);
+    }
+
+    CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
+#if ENABLE(JIT)
+    void setPCToCodeOriginMap(std::unique_ptr&&);
+    std::optional findPC(void* pc);
+#endif
+
+protected:
+    void finalizeLLIntInlineCaches();
+    void finalizeBaselineJITInlineCaches();
+
+#if ENABLE(DFG_JIT)
+    void tallyFrequentExitSites();
+#else
+    void tallyFrequentExitSites() { }
+#endif
+
+private:
+    friend class CodeBlockSet;
+
+    BytecodeLivenessAnalysis& livenessAnalysisSlow();
+    
+    CodeBlock* specialOSREntryBlockOrNull();
+    
+    void noticeIncomingCall(ExecState* callerFrame);
+    
+    double optimizationThresholdScalingFactor();
+
+    void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
+
+    void setConstantRegisters(const Vector>& constants, const Vector& constantsSourceCodeRepresentation);
+
+    void replaceConstant(int index, JSValue value)
+    {
+        ASSERT(isConstantRegisterIndex(index) && static_cast(index - FirstConstantRegisterIndex) < m_constantRegisters.size());
+        m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), this, value);
+    }
+
+    void dumpBytecode(
+        PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
+        const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
+
+    CString registerName(int r) const;
+    CString constantName(int index) const;
+    void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
+    void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
+    void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
+    enum CacheDumpMode { DumpCaches, DontDumpCaches };
+    void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
+    void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printPutByIdCacheStatus(PrintStream&, int location, const StubInfoMap&);
+    void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
+    void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
+
+    void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
+    void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+    void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
+    void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
+    void dumpArithProfile(PrintStream&, ArithProfile*, bool& hasPrintedProfiling);
+
+    bool shouldVisitStrongly(const ConcurrentJSLocker&);
+    bool shouldJettisonDueToWeakReference();
+    bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
+    
+    void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
+    void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
+        
+    void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
+    void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
+    void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
+
+    std::chrono::milliseconds timeSinceCreation()
+    {
+        return std::chrono::duration_cast(
+            std::chrono::steady_clock::now() - m_creationTime);
+    }
+
+    void createRareDataIfNecessary()
+    {
+        if (!m_rareData)
+            m_rareData = std::make_unique();
+    }
+
+    void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray&);
+
+    WriteBarrier m_unlinkedCode;
+    int m_numParameters;
+    union {
+        unsigned m_debuggerRequests;
+        struct {
+            unsigned m_hasDebuggerStatement : 1;
+            unsigned m_steppingMode : 1;
+            unsigned m_numBreakpoints : 30;
+        };
+    };
+    WriteBarrier m_ownerExecutable;
+    VM* m_vm;
+
+    RefCountedArray m_instructions;
+    VirtualRegister m_thisRegister;
+    VirtualRegister m_scopeRegister;
+    mutable CodeBlockHash m_hash;
+
+    RefPtr m_source;
+    unsigned m_sourceOffset;
+    unsigned m_firstLineColumnOffset;
+
+    RefCountedArray m_llintCallLinkInfos;
+    SentinelLinkedList> m_incomingLLIntCalls;
+    StructureWatchpointMap m_llintGetByIdWatchpointMap;
+    RefPtr m_jitCode;
+#if ENABLE(JIT)
+    std::unique_ptr m_calleeSaveRegisters;
+    Bag m_stubInfos;
+    Bag m_addICs;
+    Bag m_mulICs;
+    Bag m_negICs;
+    Bag m_subICs;
+    Bag m_byValInfos;
+    Bag m_callLinkInfos;
+    SentinelLinkedList> m_incomingCalls;
+    SentinelLinkedList> m_incomingPolymorphicCalls;
+    std::unique_ptr m_pcToCodeOriginMap;
+#endif
+    std::unique_ptr m_jitCodeMap;
+#if ENABLE(DFG_JIT)
+    // This is relevant to non-DFG code blocks that serve as the profiled code block
+    // for DFG code blocks.
+    DFG::ExitProfile m_exitProfile;
+    CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
+#endif
+    RefCountedArray m_argumentValueProfiles;
+    RefCountedArray m_valueProfiles;
+    SegmentedVector m_rareCaseProfiles;
+    RefCountedArray m_arrayAllocationProfiles;
+    ArrayProfileVector m_arrayProfiles;
+    RefCountedArray m_objectAllocationProfiles;
+
+    // Constant Pool
+    COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier), Register_must_be_same_size_as_WriteBarrier_Unknown);
+    // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
+    // it, so we're stuck with it for now.
+    Vector> m_constantRegisters;
+    Vector m_constantsSourceCodeRepresentation;
+    RefCountedArray> m_functionDecls;
+    RefCountedArray> m_functionExprs;
+
+    WriteBarrier m_alternative;
+    
+    BaselineExecutionCounter m_llintExecuteCounter;
+
+    BaselineExecutionCounter m_jitExecuteCounter;
+    uint32_t m_osrExitCounter;
+    uint16_t m_optimizationDelayCounter;
+    uint16_t m_reoptimizationRetryCounter;
+
+    std::chrono::steady_clock::time_point m_creationTime;
+
+    std::unique_ptr m_livenessAnalysis;
+
+    std::unique_ptr m_rareData;
+
+    UnconditionalFinalizer m_unconditionalFinalizer;
+    WeakReferenceHarvester m_weakReferenceHarvester;
+};
+
+inline Register& ExecState::r(int index)
+{
+    CodeBlock* codeBlock = this->codeBlock();
+    if (codeBlock->isConstantRegisterIndex(index))
+        return *reinterpret_cast(&codeBlock->constantRegister(index));
+    return this[index];
+}
+
+inline Register& ExecState::r(VirtualRegister reg)
+{
+    return r(reg.offset());
+}
+
+inline Register& ExecState::uncheckedR(int index)
+{
+    RELEASE_ASSERT(index < FirstConstantRegisterIndex);
+    return this[index];
+}
+
+inline Register& ExecState::uncheckedR(VirtualRegister reg)
+{
+    return uncheckedR(reg.offset());
+}
+
+inline void CodeBlock::clearVisitWeaklyHasBeenCalled()
+{
+    m_visitWeaklyHasBeenCalled = false;
+}
+
+template 
+JSObject* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
+{
+    if (hasJITCodeFor(kind)) {
+        if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlock());
+        else if (std::is_same::value)
+            resultCodeBlock = jsCast(jsCast(this)->codeBlockFor(kind));
+        else
+            RELEASE_ASSERT_NOT_REACHED();
+        return nullptr;
+    }
+    return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
+}
+
+#define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
+    (codeBlock->vm()->logEvent(codeBlock, summary, [&] () { return toCString details; }))
+
+} // namespace JSC
diff --git a/bytecode/CodeBlockHash.cpp b/bytecode/CodeBlockHash.cpp
new file mode 100644
index 0000000..87c092f
--- /dev/null
+++ b/bytecode/CodeBlockHash.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CodeBlockHash.h"
+
+#include "SourceCode.h"
+#include 
+#include 
+
+namespace JSC {
+
+CodeBlockHash::CodeBlockHash(const char* string)
+    : m_hash(sixCharacterHashStringToInteger(string))
+{
+}
+
+CodeBlockHash::CodeBlockHash(const SourceCode& sourceCode, CodeSpecializationKind kind)
+    : m_hash(0)
+{
+    SHA1 sha1;
+    sha1.addBytes(sourceCode.toUTF8());
+    SHA1::Digest digest;
+    sha1.computeHash(digest);
+    m_hash += digest[0] | (digest[1] << 8) | (digest[2] << 16) | (digest[3] << 24);
+    m_hash ^= static_cast(kind);
+    
+    // Ensure that 0 corresponds to the hash not having been computed.
+    if (!m_hash)
+        m_hash = 1;
+}
+
+void CodeBlockHash::dump(PrintStream& out) const
+{
+    std::array buffer = integerToSixCharacterHashString(m_hash);
+    
+#if !ASSERT_DISABLED
+    CodeBlockHash recompute(buffer.data());
+    ASSERT(recompute == *this);
+#endif // !ASSERT_DISABLED
+    
+    out.print(buffer.data());
+}
+
+} // namespace JSC
+
diff --git a/bytecode/CodeBlockHash.h b/bytecode/CodeBlockHash.h
new file mode 100644
index 0000000..b828fe8
--- /dev/null
+++ b/bytecode/CodeBlockHash.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeSpecializationKind.h"
+#include 
+
+// CodeBlock hashes are useful for informally identifying code blocks. They correspond
+// to the low 32 bits of a SHA1 hash of the source code with two low bit flipped
+// according to the role that the code block serves (call, construct). Additionally, the
+// hashes are typically operated over using a string in which the hash is transformed
+// into a 6-byte alphanumeric representation. This can be retrieved by using
+// toString(const CodeBlockHash&). Finally, we support CodeBlockHashes for native
+// functions, in which case the hash is replaced by the function address.
+
+namespace JSC {
+
+class SourceCode;
+
+class CodeBlockHash {
+public:
+    CodeBlockHash()
+        : m_hash(0)
+    {
+    }
+    
+    explicit CodeBlockHash(unsigned hash)
+        : m_hash(hash)
+    {
+    }
+    
+    CodeBlockHash(const SourceCode&, CodeSpecializationKind);
+    
+    explicit CodeBlockHash(const char*);
+
+    bool isSet() const { return !!m_hash; }
+    bool operator!() const { return !isSet(); }
+    
+    unsigned hash() const { return m_hash; }
+    
+    void dump(PrintStream&) const;
+    
+    // Comparison methods useful for bisection.
+    bool operator==(const CodeBlockHash& other) const { return hash() == other.hash(); }
+    bool operator!=(const CodeBlockHash& other) const { return hash() != other.hash(); }
+    bool operator<(const CodeBlockHash& other) const { return hash() < other.hash(); }
+    bool operator>(const CodeBlockHash& other) const { return hash() > other.hash(); }
+    bool operator<=(const CodeBlockHash& other) const { return hash() <= other.hash(); }
+    bool operator>=(const CodeBlockHash& other) const { return hash() >= other.hash(); }
+    
+private:
+    unsigned m_hash;
+};
+
+} // namespace JSC
diff --git a/bytecode/CodeBlockJettisoningWatchpoint.cpp b/bytecode/CodeBlockJettisoningWatchpoint.cpp
new file mode 100644
index 0000000..50cf737
--- /dev/null
+++ b/bytecode/CodeBlockJettisoningWatchpoint.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CodeBlockJettisoningWatchpoint.h"
+
+#include "CodeBlock.h"
+#include "DFGCommon.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail)
+{
+    if (DFG::shouldDumpDisassembly())
+        dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n");
+
+    m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/CodeBlockJettisoningWatchpoint.h b/bytecode/CodeBlockJettisoningWatchpoint.h
new file mode 100644
index 0000000..635cd78
--- /dev/null
+++ b/bytecode/CodeBlockJettisoningWatchpoint.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class CodeBlock;
+
+class CodeBlockJettisoningWatchpoint : public Watchpoint {
+public:
+    CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock)
+        : m_codeBlock(codeBlock)
+    {
+    }
+    
+protected:
+    void fireInternal(const FireDetail&) override;
+
+private:
+    CodeBlock* m_codeBlock;
+};
+
+} // namespace JSC
diff --git a/bytecode/CodeBlockWithJITType.h b/bytecode/CodeBlockWithJITType.h
new file mode 100644
index 0000000..37f83c4
--- /dev/null
+++ b/bytecode/CodeBlockWithJITType.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+// We sometimes what to print the CodeBlock's ID before setting its JITCode. At that
+// point the CodeBlock will claim a bogus JITType. This helper class lets us do that.
+
+class CodeBlockWithJITType {
+public:
+    CodeBlockWithJITType(CodeBlock* codeBlock, JITCode::JITType jitType)
+        : m_codeBlock(codeBlock)
+        , m_jitType(jitType)
+    {
+    }
+    
+    void dump(PrintStream& out) const
+    {
+        m_codeBlock->dumpAssumingJITType(out, m_jitType);
+    }
+private:
+    CodeBlock* m_codeBlock;
+    JITCode::JITType m_jitType;
+};
+
+} // namespace JSC
diff --git a/bytecode/CodeOrigin.cpp b/bytecode/CodeOrigin.cpp
new file mode 100644
index 0000000..a52df92
--- /dev/null
+++ b/bytecode/CodeOrigin.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CodeOrigin.h"
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
+{
+    unsigned result = 1;
+    for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+        result++;
+    return result;
+}
+
+unsigned CodeOrigin::inlineDepth() const
+{
+    return inlineDepthForCallFrame(inlineCallFrame);
+}
+
+bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const
+{
+    CodeOrigin a = *this;
+    CodeOrigin b = other;
+
+    if (!a.isSet())
+        return !b.isSet();
+    if (!b.isSet())
+        return false;
+    
+    if (a.isHashTableDeletedValue())
+        return b.isHashTableDeletedValue();
+    if (b.isHashTableDeletedValue())
+        return false;
+    
+    for (;;) {
+        ASSERT(a.isSet());
+        ASSERT(b.isSet());
+        
+        if (a.bytecodeIndex != b.bytecodeIndex)
+            return false;
+        
+        if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame))
+            return false;
+        
+        if (!a.inlineCallFrame)
+            return true;
+        
+        if (a.inlineCallFrame->baselineCodeBlock.get() != b.inlineCallFrame->baselineCodeBlock.get())
+            return false;
+        
+        a = a.inlineCallFrame->directCaller;
+        b = b.inlineCallFrame->directCaller;
+    }
+}
+
+unsigned CodeOrigin::approximateHash() const
+{
+    if (!isSet())
+        return 0;
+    if (isHashTableDeletedValue())
+        return 1;
+    
+    unsigned result = 2;
+    CodeOrigin codeOrigin = *this;
+    for (;;) {
+        result += codeOrigin.bytecodeIndex;
+        
+        if (!codeOrigin.inlineCallFrame)
+            return result;
+        
+        result += WTF::PtrHash::hash(codeOrigin.inlineCallFrame->baselineCodeBlock.get());
+        
+        codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+    }
+}
+
+Vector CodeOrigin::inlineStack() const
+{
+    Vector result(inlineDepth());
+    result.last() = *this;
+    unsigned index = result.size() - 2;
+    for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+        result[index--] = current->directCaller;
+    RELEASE_ASSERT(!result[0].inlineCallFrame);
+    return result;
+}
+
+CodeBlock* CodeOrigin::codeOriginOwner() const
+{
+    if (!inlineCallFrame)
+        return 0;
+    return inlineCallFrame->baselineCodeBlock.get();
+}
+
+int CodeOrigin::stackOffset() const
+{
+    if (!inlineCallFrame)
+        return 0;
+    
+    return inlineCallFrame->stackOffset;
+}
+
+void CodeOrigin::dump(PrintStream& out) const
+{
+    if (!isSet()) {
+        out.print("");
+        return;
+    }
+    
+    Vector stack = inlineStack();
+    for (unsigned i = 0; i < stack.size(); ++i) {
+        if (i)
+            out.print(" --> ");
+        
+        if (InlineCallFrame* frame = stack[i].inlineCallFrame) {
+            out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->baselineCodeBlock.get()), "> ");
+            if (frame->isClosureCall)
+                out.print("(closure) ");
+        }
+        
+        out.print("bc#", stack[i].bytecodeIndex);
+    }
+}
+
+void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const
+{
+    dump(out);
+}
+
+} // namespace JSC
diff --git a/bytecode/CodeOrigin.h b/bytecode/CodeOrigin.h
new file mode 100644
index 0000000..38712f9
--- /dev/null
+++ b/bytecode/CodeOrigin.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallMode.h"
+#include "CodeBlockHash.h"
+#include "CodeSpecializationKind.h"
+#include "WriteBarrier.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+struct DumpContext;
+struct InlineCallFrame;
+
+struct CodeOrigin {
+    static const unsigned invalidBytecodeIndex = UINT_MAX;
+    
+    // Bytecode offset that you'd use to re-execute this instruction, and the
+    // bytecode index of the bytecode instruction that produces some result that
+    // you're interested in (used for mapping Nodes whose values you're using
+    // to bytecode instructions that have the appropriate value profile).
+    unsigned bytecodeIndex;
+    
+    InlineCallFrame* inlineCallFrame;
+    
+    CodeOrigin()
+        : bytecodeIndex(invalidBytecodeIndex)
+        , inlineCallFrame(0)
+    {
+    }
+    
+    CodeOrigin(WTF::HashTableDeletedValueType)
+        : bytecodeIndex(invalidBytecodeIndex)
+        , inlineCallFrame(deletedMarker())
+    {
+    }
+    
+    explicit CodeOrigin(unsigned bytecodeIndex, InlineCallFrame* inlineCallFrame = 0)
+        : bytecodeIndex(bytecodeIndex)
+        , inlineCallFrame(inlineCallFrame)
+    {
+        ASSERT(bytecodeIndex < invalidBytecodeIndex);
+    }
+    
+    bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; }
+    explicit operator bool() const { return isSet(); }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return bytecodeIndex == invalidBytecodeIndex && !!inlineCallFrame;
+    }
+    
+    // The inline depth is the depth of the inline stack, so 1 = not inlined,
+    // 2 = inlined one deep, etc.
+    unsigned inlineDepth() const;
+    
+    // If the code origin corresponds to inlined code, gives you the heap object that
+    // would have owned the code if it had not been inlined. Otherwise returns 0.
+    CodeBlock* codeOriginOwner() const;
+    
+    int stackOffset() const;
+    
+    static unsigned inlineDepthForCallFrame(InlineCallFrame*);
+    
+    unsigned hash() const;
+    bool operator==(const CodeOrigin& other) const;
+    bool operator!=(const CodeOrigin& other) const { return !(*this == other); }
+    
+    // This checks if the two code origins correspond to the same stack trace snippets,
+    // but ignore whether the InlineCallFrame's are identical.
+    bool isApproximatelyEqualTo(const CodeOrigin& other) const;
+    
+    unsigned approximateHash() const;
+
+    template 
+    void walkUpInlineStack(const Function&);
+    
+    // Get the inline stack. This is slow, and is intended for debugging only.
+    Vector inlineStack() const;
+    
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+    static InlineCallFrame* deletedMarker()
+    {
+        return bitwise_cast(static_cast(1));
+    }
+};
+
+inline unsigned CodeOrigin::hash() const
+{
+    return WTF::IntHash::hash(bytecodeIndex) +
+        WTF::PtrHash::hash(inlineCallFrame);
+}
+
+inline bool CodeOrigin::operator==(const CodeOrigin& other) const
+{
+    return bytecodeIndex == other.bytecodeIndex
+        && inlineCallFrame == other.inlineCallFrame;
+}
+
+struct CodeOriginHash {
+    static unsigned hash(const CodeOrigin& key) { return key.hash(); }
+    static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+struct CodeOriginApproximateHash {
+    static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); }
+    static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::CodeOriginHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits {
+    static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
diff --git a/bytecode/CodeType.cpp b/bytecode/CodeType.cpp
new file mode 100644
index 0000000..0c7043d
--- /dev/null
+++ b/bytecode/CodeType.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "CodeType.h"
+
+#include 
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::CodeType codeType)
+{
+    switch (codeType) {
+    case JSC::GlobalCode:
+        out.print("Global");
+        return;
+    case JSC::EvalCode:
+        out.print("Eval");
+        return;
+    case JSC::FunctionCode:
+        out.print("Function");
+        return;
+    case JSC::ModuleCode:
+        out.print("Module");
+        return;
+    default:
+        CRASH();
+        return;
+    }
+}
+
+} // namespace WTF
+
diff --git a/bytecode/CodeType.h b/bytecode/CodeType.h
new file mode 100644
index 0000000..3c38ca2
--- /dev/null
+++ b/bytecode/CodeType.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum CodeType { GlobalCode, EvalCode, FunctionCode, ModuleCode };
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::CodeType);
+
+} // namespace WTF
diff --git a/bytecode/ComplexGetStatus.cpp b/bytecode/ComplexGetStatus.cpp
new file mode 100644
index 0000000..0622553
--- /dev/null
+++ b/bytecode/ComplexGetStatus.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ComplexGetStatus.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+ComplexGetStatus ComplexGetStatus::computeFor(
+    Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid)
+{
+    // FIXME: We should assert that we never see a structure that
+    // getOwnPropertySlotIsImpure() but for which we don't
+    // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do
+    // that, yet.
+    // https://bugs.webkit.org/show_bug.cgi?id=131810
+    
+    ASSERT(conditionSet.isValid());
+    
+    if (headStructure->takesSlowPathInDFGForImpureProperty())
+        return takesSlowPath();
+    
+    ComplexGetStatus result;
+    result.m_kind = Inlineable;
+    
+    if (!conditionSet.isEmpty()) {
+        result.m_conditionSet = conditionSet;
+        
+        if (!result.m_conditionSet.structuresEnsureValidity())
+            return skip();
+
+        unsigned numberOfSlotBases =
+            result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence);
+        RELEASE_ASSERT(numberOfSlotBases <= 1);
+        if (!numberOfSlotBases) {
+            ASSERT(result.m_offset == invalidOffset);
+            return result;
+        }
+        ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition();
+        ASSERT(base.kind() == PropertyCondition::Presence);
+
+        result.m_offset = base.offset();
+    } else
+        result.m_offset = headStructure->getConcurrently(uid);
+    
+    if (!isValidOffset(result.m_offset))
+        return takesSlowPath();
+    
+    return result;
+}
+
+} // namespace JSC
+
+
diff --git a/bytecode/ComplexGetStatus.h b/bytecode/ComplexGetStatus.h
new file mode 100644
index 0000000..d94b312
--- /dev/null
+++ b/bytecode/ComplexGetStatus.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class CodeBlock;
+class StructureChain;
+
+// This class is useful for figuring out how to inline a cached get-like access. We
+// say "get-like" because this is appropriate for loading the GetterSetter object in
+// a put_by_id that hits a setter. Notably, this doesn't figure out how to call
+// accessors, or even whether they should be called. What it gives us, is a way of
+// determining how to load the value from the requested property (identified by a
+// StringImpl* uid) from an object of the given structure in the given CodeBlock,
+// assuming that such an access had already been cached by Repatch (and so Repatch had
+// already done a bunch of safety checks). This doesn't reexecute any checks that
+// Repatch would have executed, and for prototype chain accesses, it doesn't ask the
+// objects in the prototype chain whether their getOwnPropertySlot would attempt to
+// intercept the access - so this really is only appropriate if you already know that
+// one of the JITOperations had OK'd this for caching and that Repatch concurred.
+//
+// The typical use pattern is something like:
+//
+//     ComplexGetStatus status = ComplexGetStatus::computeFor(...);
+//     switch (status.kind()) {
+//     case ComplexGetStatus::ShouldSkip:
+//         // Handle the case where this kind of access is possibly safe but wouldn't
+//         // pass the required safety checks. For example, if an IC gives us a list of
+//         // accesses and one of them is ShouldSkip, then we should pretend as if it
+//         // wasn't even there.
+//         break;
+//     case ComplexGetStatus::TakesSlowPath:
+//         // This kind of access is not safe to inline. Bail out of any attempst to
+//         // inline.
+//         break;
+//     case ComplexGetStatus::Inlineable:
+//         // The good stuff goes here. If it's Inlineable then the other properties of
+//         // the 'status' object will tell you everything you need to know about how
+//         // to execute the get-like operation.
+//         break;
+//     }
+
+class ComplexGetStatus {
+public:
+    enum Kind {
+        ShouldSkip,
+        TakesSlowPath,
+        Inlineable
+    };
+    
+    ComplexGetStatus()
+        : m_kind(ShouldSkip)
+        , m_offset(invalidOffset)
+    {
+    }
+    
+    static ComplexGetStatus skip()
+    {
+        return ComplexGetStatus();
+    }
+    
+    static ComplexGetStatus takesSlowPath()
+    {
+        ComplexGetStatus result;
+        result.m_kind = TakesSlowPath;
+        return result;
+    }
+    
+    static ComplexGetStatus computeFor(
+        Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid);
+    
+    Kind kind() const { return m_kind; }
+    PropertyOffset offset() const { return m_offset; }
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+private:
+    Kind m_kind;
+    PropertyOffset m_offset;
+    ObjectPropertyConditionSet m_conditionSet;
+};
+
+} // namespace JSC
diff --git a/bytecode/DFGExitProfile.cpp b/bytecode/DFGExitProfile.cpp
new file mode 100644
index 0000000..64fe9a3
--- /dev/null
+++ b/bytecode/DFGExitProfile.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DFGExitProfile.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "VMInlines.h"
+
+namespace JSC { namespace DFG {
+
+void FrequentExitSite::dump(PrintStream& out) const
+{
+    out.print("bc#", m_bytecodeOffset, ": ", m_kind, "/", m_jitType);
+}
+
+ExitProfile::ExitProfile() { }
+ExitProfile::~ExitProfile() { }
+
+bool ExitProfile::add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite& site)
+{
+    ASSERT(site.jitType() != ExitFromAnything);
+
+    CODEBLOCK_LOG_EVENT(owner, "frequentExit", (site));
+    
+    if (Options::verboseExitProfile())
+        dataLog(pointerDump(owner), ": Adding exit site: ", site, "\n");
+    
+    // If we've never seen any frequent exits then create the list and put this site
+    // into it.
+    if (!m_frequentExitSites) {
+        m_frequentExitSites = std::make_unique>();
+        m_frequentExitSites->append(site);
+        return true;
+    }
+    
+    // Don't add it if it's already there. This is O(n), but that's OK, because we
+    // know that the total number of places where code exits tends to not be large,
+    // and this code is only used when recompilation is triggered.
+    for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
+        if (m_frequentExitSites->at(i) == site)
+            return false;
+    }
+    
+    m_frequentExitSites->append(site);
+    return true;
+}
+
+Vector ExitProfile::exitSitesFor(unsigned bytecodeIndex)
+{
+    Vector result;
+    
+    if (!m_frequentExitSites)
+        return result;
+    
+    for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
+        if (m_frequentExitSites->at(i).bytecodeOffset() == bytecodeIndex)
+            result.append(m_frequentExitSites->at(i));
+    }
+    
+    return result;
+}
+
+bool ExitProfile::hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite& site) const
+{
+    if (!m_frequentExitSites)
+        return false;
+    
+    for (unsigned i = m_frequentExitSites->size(); i--;) {
+        if (site.subsumes(m_frequentExitSites->at(i)))
+            return true;
+    }
+    return false;
+}
+
+QueryableExitProfile::QueryableExitProfile() { }
+QueryableExitProfile::~QueryableExitProfile() { }
+
+void QueryableExitProfile::initialize(const ConcurrentJSLocker&, const ExitProfile& profile)
+{
+    if (!profile.m_frequentExitSites)
+        return;
+    
+    for (unsigned i = 0; i < profile.m_frequentExitSites->size(); ++i)
+        m_frequentExitSites.add(profile.m_frequentExitSites->at(i));
+}
+
+} } // namespace JSC::DFG
+
+#endif
diff --git a/bytecode/DFGExitProfile.h b/bytecode/DFGExitProfile.h
new file mode 100644
index 0000000..337e3ec
--- /dev/null
+++ b/bytecode/DFGExitProfile.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2011-2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(DFG_JIT)
+
+#include "ConcurrentJSLock.h"
+#include "ExitKind.h"
+#include "ExitingJITType.h"
+#include 
+#include 
+
+namespace JSC { namespace DFG {
+
+class FrequentExitSite {
+public:
+    FrequentExitSite()
+        : m_bytecodeOffset(0) // 0 = empty value
+        , m_kind(ExitKindUnset)
+        , m_jitType(ExitFromAnything)
+    {
+    }
+    
+    FrequentExitSite(WTF::HashTableDeletedValueType)
+        : m_bytecodeOffset(1) // 1 = deleted value
+        , m_kind(ExitKindUnset)
+        , m_jitType(ExitFromAnything)
+    {
+    }
+    
+    explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything)
+        : m_bytecodeOffset(bytecodeOffset)
+        , m_kind(kind)
+        , m_jitType(jitType)
+    {
+        if (m_kind == ArgumentsEscaped) {
+            // Count this one globally. It doesn't matter where in the code block the arguments excaped;
+            // the fact that they did is not associated with any particular instruction.
+            m_bytecodeOffset = 0;
+        }
+    }
+    
+    // Use this constructor if you wish for the exit site to be counted globally within its
+    // code block.
+    explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything)
+        : m_bytecodeOffset(0)
+        , m_kind(kind)
+        , m_jitType(jitType)
+    {
+    }
+    
+    bool operator!() const
+    {
+        return m_kind == ExitKindUnset;
+    }
+    
+    bool operator==(const FrequentExitSite& other) const
+    {
+        return m_bytecodeOffset == other.m_bytecodeOffset
+            && m_kind == other.m_kind
+            && m_jitType == other.m_jitType;
+    }
+    
+    bool subsumes(const FrequentExitSite& other) const
+    {
+        if (m_bytecodeOffset != other.m_bytecodeOffset)
+            return false;
+        if (m_kind != other.m_kind)
+            return false;
+        if (m_jitType == ExitFromAnything)
+            return true;
+        return m_jitType == other.m_jitType;
+    }
+    
+    unsigned hash() const
+    {
+        return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7;
+    }
+    
+    unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+    ExitKind kind() const { return m_kind; }
+    ExitingJITType jitType() const { return m_jitType; }
+    
+    FrequentExitSite withJITType(ExitingJITType jitType) const
+    {
+        FrequentExitSite result = *this;
+        result.m_jitType = jitType;
+        return result;
+    }
+
+    bool isHashTableDeletedValue() const
+    {
+        return m_kind == ExitKindUnset && m_bytecodeOffset;
+    }
+    
+    void dump(PrintStream& out) const;
+
+private:
+    unsigned m_bytecodeOffset;
+    ExitKind m_kind;
+    ExitingJITType m_jitType;
+};
+
+struct FrequentExitSiteHash {
+    static unsigned hash(const FrequentExitSite& key) { return key.hash(); }
+    static bool equal(const FrequentExitSite& a, const FrequentExitSite& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::DFG
+
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::DFG::FrequentExitSiteHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
+
+namespace JSC { namespace DFG {
+
+class QueryableExitProfile;
+
+class ExitProfile {
+public:
+    ExitProfile();
+    ~ExitProfile();
+    
+    // Add a new frequent exit site. Return true if this is a new one, or false
+    // if we already knew about it. This is an O(n) operation, because it errs
+    // on the side of keeping the data structure compact. Also, this will only
+    // be called a fixed number of times per recompilation. Recompilation is
+    // rare to begin with, and implies doing O(n) operations on the CodeBlock
+    // anyway.
+    bool add(const ConcurrentJSLocker&, CodeBlock* owner, const FrequentExitSite&);
+    
+    // Get the frequent exit sites for a bytecode index. This is O(n), and is
+    // meant to only be used from debugging/profiling code.
+    Vector exitSitesFor(unsigned bytecodeIndex);
+    
+    // This is O(n) and should be called on less-frequently executed code paths
+    // in the compiler. It should be strictly cheaper than building a
+    // QueryableExitProfile, if you really expect this to be called infrequently
+    // and you believe that there are few exit sites.
+    bool hasExitSite(const ConcurrentJSLocker&, const FrequentExitSite&) const;
+    bool hasExitSite(const ConcurrentJSLocker& locker, ExitKind kind) const
+    {
+        return hasExitSite(locker, FrequentExitSite(kind));
+    }
+    bool hasExitSite(const ConcurrentJSLocker& locker, unsigned bytecodeIndex, ExitKind kind) const
+    {
+        return hasExitSite(locker, FrequentExitSite(bytecodeIndex, kind));
+    }
+    
+private:
+    friend class QueryableExitProfile;
+    
+    std::unique_ptr> m_frequentExitSites;
+};
+
+class QueryableExitProfile {
+public:
+    QueryableExitProfile();
+    ~QueryableExitProfile();
+    
+    void initialize(const ConcurrentJSLocker&, const ExitProfile&);
+
+    bool hasExitSite(const FrequentExitSite& site) const
+    {
+        if (site.jitType() == ExitFromAnything) {
+            return hasExitSite(site.withJITType(ExitFromDFG))
+                || hasExitSite(site.withJITType(ExitFromFTL));
+        }
+        return m_frequentExitSites.find(site) != m_frequentExitSites.end();
+    }
+    
+    bool hasExitSite(ExitKind kind) const
+    {
+        return hasExitSite(FrequentExitSite(kind));
+    }
+    
+    bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const
+    {
+        return hasExitSite(FrequentExitSite(bytecodeIndex, kind));
+    }
+private:
+    HashSet m_frequentExitSites;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/bytecode/DOMJITAccessCasePatchpointParams.cpp b/bytecode/DOMJITAccessCasePatchpointParams.cpp
new file mode 100644
index 0000000..95e1401
--- /dev/null
+++ b/bytecode/DOMJITAccessCasePatchpointParams.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DOMJITAccessCasePatchpointParams.h"
+
+#include "LinkBuffer.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+template
+class SlowPathCallGeneratorWithArguments : public DOMJITAccessCasePatchpointParams::SlowPathCallGenerator {
+public:
+    SlowPathCallGeneratorWithArguments(JumpType from, CCallHelpers::Label to, FunctionType function, ResultType result, std::tuple arguments)
+        : m_from(from)
+        , m_to(to)
+        , m_function(function)
+        , m_result(result)
+        , m_arguments(arguments)
+    {
+    }
+
+    template
+    CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit, std::index_sequence)
+    {
+        CCallHelpers::JumpList exceptions;
+        // We spill (1) the used registers by IC and (2) the used registers by DOMJIT::Patchpoint.
+        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersByPatchpoint);
+
+        jit.store32(
+            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+            CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+
+        jit.makeSpaceOnStackForCCall();
+
+        // FIXME: Currently, we do not check any ARM EABI / SH4 things here.
+        // But it is OK because a compile error happens when you pass JSValueRegs as an argument.
+        // https://bugs.webkit.org/show_bug.cgi?id=163099
+        jit.setupArgumentsWithExecState(std::get(m_arguments)...);
+
+        CCallHelpers::Call operationCall = jit.call();
+        auto function = m_function;
+        jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+            linkBuffer.link(operationCall, FunctionPtr(function));
+        });
+
+        jit.setupResults(m_result);
+        jit.reclaimSpaceOnStackForCCall();
+
+        CCallHelpers::Jump noException = jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+        state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+        exceptions.append(jit.jump());
+
+        noException.link(&jit);
+        RegisterSet dontRestore;
+        dontRestore.set(m_result);
+        state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+
+        return exceptions;
+    }
+
+    CCallHelpers::JumpList generate(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit) override
+    {
+        m_from.link(&jit);
+        CCallHelpers::JumpList exceptions = generateImpl(state, usedRegistersByPatchpoint, jit, std::make_index_sequence>::value>());
+        jit.jump().linkTo(m_to, &jit);
+        return exceptions;
+    }
+
+protected:
+    JumpType m_from;
+    CCallHelpers::Label m_to;
+    FunctionType m_function;
+    ResultType m_result;
+    std::tuple m_arguments;
+};
+
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) \
+    void DOMJITAccessCasePatchpointParams::addSlowPathCallImpl(CCallHelpers::JumpList from, CCallHelpers& jit, OperationType operation, ResultType result, std::tuple<__VA_ARGS__> args) \
+    { \
+        CCallHelpers::Label to = jit.label(); \
+        m_generators.append(std::make_unique>(from, to, operation, result, args)); \
+    } \
+
+DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+
+CCallHelpers::JumpList DOMJITAccessCasePatchpointParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers& jit)
+{
+    CCallHelpers::JumpList exceptions;
+    for (auto& generator : m_generators)
+        exceptions.append(generator->generate(state, usedRegistersByPatchpoint, jit));
+    return exceptions;
+}
+
+}
+
+#endif
diff --git a/bytecode/DOMJITAccessCasePatchpointParams.h b/bytecode/DOMJITAccessCasePatchpointParams.h
new file mode 100644
index 0000000..8cf9751
--- /dev/null
+++ b/bytecode/DOMJITAccessCasePatchpointParams.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "DOMJITPatchpointParams.h"
+
+namespace JSC {
+
+struct AccessGenerationState;
+
+class DOMJITAccessCasePatchpointParams : public DOMJIT::PatchpointParams {
+public:
+    DOMJITAccessCasePatchpointParams(Vector&& regs, Vector&& gpScratch, Vector&& fpScratch)
+        : DOMJIT::PatchpointParams(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch))
+    {
+    }
+
+    class SlowPathCallGenerator {
+    public:
+        virtual ~SlowPathCallGenerator() { }
+        virtual CCallHelpers::JumpList generate(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&) = 0;
+    };
+
+    CCallHelpers::JumpList emitSlowPathCalls(AccessGenerationState&, const RegisterSet& usedRegistersByPatchpoint, CCallHelpers&);
+
+private:
+#define JSC_DEFINE_CALL_OPERATIONS(OperationType, ResultType, ...) void addSlowPathCallImpl(CCallHelpers::JumpList, CCallHelpers&, OperationType, ResultType, std::tuple<__VA_ARGS__> args) override;
+    DOMJIT_SLOW_PATH_CALLS(JSC_DEFINE_CALL_OPERATIONS)
+#undef JSC_DEFINE_CALL_OPERATIONS
+    Vector> m_generators;
+};
+
+}
+
+#endif
diff --git a/bytecode/DataFormat.cpp b/bytecode/DataFormat.cpp
new file mode 100644
index 0000000..8bd42e1
--- /dev/null
+++ b/bytecode/DataFormat.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DataFormat.h"
+
+#include 
+#include 
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::DataFormat dataFormat)
+{
+    out.print(dataFormatToString(dataFormat));
+}
+
+} // namespace WTF
diff --git a/bytecode/DataFormat.h b/bytecode/DataFormat.h
new file mode 100644
index 0000000..22c6492
--- /dev/null
+++ b/bytecode/DataFormat.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+// === DataFormat ===
+//
+// This enum tracks the current representation in which a value is being held.
+// Values may be unboxed primitives (int32, double, or cell), or boxed as a JSValue.
+// For boxed values, we may know the type of boxing that has taken place.
+// (May also need bool, array, object, string types!)
+enum DataFormat {
+    DataFormatNone = 0,
+    DataFormatInt32 = 1,
+    DataFormatInt52 = 2, // Int52's are left-shifted by 16 by default.
+    DataFormatStrictInt52 = 3, // "Strict" Int52 means it's not shifted.
+    DataFormatDouble = 4,
+    DataFormatBoolean = 5,
+    DataFormatCell = 6,
+    DataFormatStorage = 7,
+    DataFormatJS = 8,
+    DataFormatJSInt32 = DataFormatJS | DataFormatInt32,
+    DataFormatJSDouble = DataFormatJS | DataFormatDouble,
+    DataFormatJSCell = DataFormatJS | DataFormatCell,
+    DataFormatJSBoolean = DataFormatJS | DataFormatBoolean,
+    
+    // Marker deliminating ordinary data formats and OSR-only data formats.
+    DataFormatOSRMarker = 32, 
+    
+    // Special data formats used only for OSR.
+    DataFormatDead = 33, // Implies jsUndefined().
+};
+
+inline const char* dataFormatToString(DataFormat dataFormat)
+{
+    switch (dataFormat) {
+    case DataFormatNone:
+        return "None";
+    case DataFormatInt32:
+        return "Int32";
+    case DataFormatInt52:
+        return "Int52";
+    case DataFormatStrictInt52:
+        return "StrictInt52";
+    case DataFormatDouble:
+        return "Double";
+    case DataFormatCell:
+        return "Cell";
+    case DataFormatBoolean:
+        return "Boolean";
+    case DataFormatStorage:
+        return "Storage";
+    case DataFormatJS:
+        return "JS";
+    case DataFormatJSInt32:
+        return "JSInt32";
+    case DataFormatJSDouble:
+        return "JSDouble";
+    case DataFormatJSCell:
+        return "JSCell";
+    case DataFormatJSBoolean:
+        return "JSBoolean";
+    case DataFormatDead:
+        return "Dead";
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return "Unknown";
+    }
+}
+
+inline bool isJSFormat(DataFormat format, DataFormat expectedFormat)
+{
+    ASSERT(expectedFormat & DataFormatJS);
+    return (format | DataFormatJS) == expectedFormat;
+}
+
+inline bool isJSInt32(DataFormat format)
+{
+    return isJSFormat(format, DataFormatJSInt32);
+}
+
+inline bool isJSDouble(DataFormat format)
+{
+    return isJSFormat(format, DataFormatJSDouble);
+}
+
+inline bool isJSCell(DataFormat format)
+{
+    return isJSFormat(format, DataFormatJSCell);
+}
+
+inline bool isJSBoolean(DataFormat format)
+{
+    return isJSFormat(format, DataFormatJSBoolean);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::DataFormat);
+
+} // namespace WTF
diff --git a/bytecode/DeferredCompilationCallback.cpp b/bytecode/DeferredCompilationCallback.cpp
new file mode 100644
index 0000000..762387c
--- /dev/null
+++ b/bytecode/DeferredCompilationCallback.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DeferredCompilationCallback.h"
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+DeferredCompilationCallback::DeferredCompilationCallback() { }
+DeferredCompilationCallback::~DeferredCompilationCallback() { }
+
+void DeferredCompilationCallback::compilationDidComplete(CodeBlock*, CodeBlock*, CompilationResult result)
+{
+    dumpCompiledSourcesIfNeeded();
+
+    switch (result) {
+    case CompilationFailed:
+    case CompilationInvalidated:
+    case CompilationSuccessful:
+        break;
+    case CompilationDeferred:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+Vector& DeferredCompilationCallback::ensureDeferredSourceDump()
+{
+    if (!m_deferredSourceDump)
+        m_deferredSourceDump = std::make_unique>();
+    return *m_deferredSourceDump;
+}
+
+void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded()
+{
+    if (!m_deferredSourceDump)
+        return;
+
+    ASSERT(Options::dumpSourceAtDFGTime());
+    unsigned index = 0;
+    for (auto& info : *m_deferredSourceDump) {
+        dataLog("[", ++index, "] ");
+        info.dump();
+    }
+}
+
+} // JSC
+
diff --git a/bytecode/DeferredCompilationCallback.h b/bytecode/DeferredCompilationCallback.h
new file mode 100644
index 0000000..9257110
--- /dev/null
+++ b/bytecode/DeferredCompilationCallback.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CompilationResult.h"
+#include "DeferredSourceDump.h"
+#include 
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+
+class DeferredCompilationCallback : public RefCounted {
+protected:
+    DeferredCompilationCallback();
+
+public:
+    virtual ~DeferredCompilationCallback();
+
+    virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) = 0;
+    virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult);
+
+    Vector& ensureDeferredSourceDump();
+
+private:
+    void dumpCompiledSourcesIfNeeded();
+
+    std::unique_ptr> m_deferredSourceDump;
+};
+
+} // namespace JSC
diff --git a/bytecode/DeferredSourceDump.cpp b/bytecode/DeferredSourceDump.cpp
new file mode 100644
index 0000000..48079db
--- /dev/null
+++ b/bytecode/DeferredSourceDump.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DeferredSourceDump.h"
+
+#include "CodeBlock.h"
+#include "CodeBlockWithJITType.h"
+
+namespace JSC {
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock)
+    : m_codeBlock(codeBlock)
+    , m_rootCodeBlock(nullptr)
+    , m_rootJITType(JITCode::None)
+{
+}
+
+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin)
+    : m_codeBlock(codeBlock)
+    , m_rootCodeBlock(rootCodeBlock)
+    , m_rootJITType(rootJITType)
+    , m_callerCodeOrigin(callerCodeOrigin)
+{
+}
+
+void DeferredSourceDump::dump()
+{
+    bool isInlinedFrame = !!m_rootCodeBlock;
+    if (isInlinedFrame)
+        dataLog("Inlined ");
+    else
+        dataLog("Compiled ");
+    dataLog(*m_codeBlock);
+
+    if (isInlinedFrame)
+        dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin);
+
+    dataLog("\n'''");
+    m_codeBlock->dumpSource();
+    dataLog("'''\n");
+}
+
+} // namespace JSC
diff --git a/bytecode/DeferredSourceDump.h b/bytecode/DeferredSourceDump.h
new file mode 100644
index 0000000..6c9943d
--- /dev/null
+++ b/bytecode/DeferredSourceDump.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeOrigin.h"
+#include "JITCode.h"
+
+namespace JSC {
+
+class CodeBlock;
+
+class DeferredSourceDump {
+public:
+    DeferredSourceDump(CodeBlock*);
+    DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin);
+
+    void dump();
+
+private:
+    CodeBlock* m_codeBlock;
+    CodeBlock* m_rootCodeBlock;
+    JITCode::JITType m_rootJITType;
+    CodeOrigin m_callerCodeOrigin;
+};
+
+} // namespace JSC
diff --git a/bytecode/DirectEvalCodeCache.cpp b/bytecode/DirectEvalCodeCache.cpp
new file mode 100644
index 0000000..5bfef12
--- /dev/null
+++ b/bytecode/DirectEvalCodeCache.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "DirectEvalCodeCache.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void DirectEvalCodeCache::setSlow(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+{
+    LockHolder locker(m_lock);
+    m_cacheMap.set(CacheKey(evalSource, callSiteIndex), WriteBarrier(exec->vm(), owner, evalExecutable));
+}
+
+void DirectEvalCodeCache::clear()
+{
+    LockHolder locker(m_lock);
+    m_cacheMap.clear();
+}
+
+void DirectEvalCodeCache::visitAggregate(SlotVisitor& visitor)
+{
+    LockHolder locker(m_lock);
+    EvalCacheMap::iterator end = m_cacheMap.end();
+    for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
+        visitor.append(ptr->value);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/DirectEvalCodeCache.h b/bytecode/DirectEvalCodeCache.h
new file mode 100644
index 0000000..e075357
--- /dev/null
+++ b/bytecode/DirectEvalCodeCache.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "DirectEvalExecutable.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+    class SlotVisitor;
+
+    class DirectEvalCodeCache {
+    public:
+        class CacheKey {
+        public:
+            CacheKey(const String& source, CallSiteIndex callSiteIndex)
+                : m_source(source.impl())
+                , m_callSiteIndex(callSiteIndex)
+            {
+            }
+
+            CacheKey(WTF::HashTableDeletedValueType)
+                : m_source(WTF::HashTableDeletedValue)
+            {
+            }
+
+            CacheKey() = default;
+
+            unsigned hash() const { return m_source->hash() ^ m_callSiteIndex.bits(); }
+
+            bool isEmptyValue() const { return !m_source; }
+
+            bool operator==(const CacheKey& other) const
+            {
+                return m_callSiteIndex == other.m_callSiteIndex && WTF::equal(m_source.get(), other.m_source.get());
+            }
+
+            bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); }
+
+            struct Hash {
+                static unsigned hash(const CacheKey& key)
+                {
+                    return key.hash();
+                }
+                static bool equal(const CacheKey& lhs, const CacheKey& rhs)
+                {
+                    return lhs == rhs;
+                }
+                static const bool safeToCompareToEmptyOrDeleted = false;
+            };
+
+            typedef SimpleClassHashTraits HashTraits;
+
+        private:
+            RefPtr m_source;
+            CallSiteIndex m_callSiteIndex;
+        };
+
+        DirectEvalExecutable* tryGet(const String& evalSource, CallSiteIndex callSiteIndex)
+        {
+            return m_cacheMap.fastGet(CacheKey(evalSource, callSiteIndex)).get();
+        }
+        
+        void set(ExecState* exec, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
+        {
+            if (m_cacheMap.size() < maxCacheEntries)
+                setSlow(exec, owner, evalSource, callSiteIndex, evalExecutable);
+        }
+
+        bool isEmpty() const { return m_cacheMap.isEmpty(); }
+
+        void visitAggregate(SlotVisitor&);
+
+        void clear();
+
+    private:
+        static const int maxCacheEntries = 64;
+
+        void setSlow(ExecState*, JSCell* owner, const String& evalSource, CallSiteIndex, DirectEvalExecutable*);
+
+        typedef HashMap, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap;
+        EvalCacheMap m_cacheMap;
+        Lock m_lock;
+    };
+
+} // namespace JSC
diff --git a/bytecode/EvalCodeBlock.cpp b/bytecode/EvalCodeBlock.cpp
new file mode 100644
index 0000000..5232a0e
--- /dev/null
+++ b/bytecode/EvalCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "EvalCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo EvalCodeBlock::s_info = {
+    "EvalCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(EvalCodeBlock)
+};
+
+void EvalCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~EvalCodeBlock();
+}
+
+} // namespace JSC
diff --git a/bytecode/EvalCodeBlock.h b/bytecode/EvalCodeBlock.h
new file mode 100644
index 0000000..e9e61ba
--- /dev/null
+++ b/bytecode/EvalCodeBlock.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+
+namespace JSC {
+
+class EvalCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static EvalCodeBlock* create(VM* vm, CopyParsedBlockTag, EvalCodeBlock& other)
+    {
+        EvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static EvalCodeBlock* create(VM* vm, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider)
+    {
+        EvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            EvalCodeBlock(vm, vm->evalCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+    const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
+    unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
+    
+private:
+    EvalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, EvalCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+        
+    EvalCodeBlock(VM* vm, Structure* structure, EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
+    {
+    }
+    
+    static void destroy(JSCell*);
+
+private:
+    UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast(unlinkedCodeBlock()); }
+};
+
+} // namespace JSC
diff --git a/bytecode/ExecutableInfo.h b/bytecode/ExecutableInfo.h
new file mode 100644
index 0000000..750900e
--- /dev/null
+++ b/bytecode/ExecutableInfo.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ParserModes.h"
+
+namespace JSC {
+    
+enum class DerivedContextType : uint8_t { None, DerivedConstructorContext, DerivedMethodContext };
+enum class EvalContextType    : uint8_t { None, FunctionEvalContext };
+
+// FIXME: These flags, ParserModes and propagation to XXXCodeBlocks should be reorganized.
+// https://bugs.webkit.org/show_bug.cgi?id=151547
+struct ExecutableInfo {
+    ExecutableInfo(bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind, JSParserScriptMode scriptMode, SuperBinding superBinding, SourceParseMode parseMode, DerivedContextType derivedContextType, bool isArrowFunctionContext, bool isClassContext, EvalContextType evalContextType)
+        : m_usesEval(usesEval)
+        , m_isStrictMode(isStrictMode)
+        , m_isConstructor(isConstructor)
+        , m_isBuiltinFunction(isBuiltinFunction)
+        , m_constructorKind(static_cast(constructorKind))
+        , m_superBinding(static_cast(superBinding))
+        , m_scriptMode(static_cast(scriptMode))
+        , m_parseMode(parseMode)
+        , m_derivedContextType(static_cast(derivedContextType))
+        , m_isArrowFunctionContext(isArrowFunctionContext)
+        , m_isClassContext(isClassContext)
+        , m_evalContextType(static_cast(evalContextType))
+    {
+        ASSERT(m_constructorKind == static_cast(constructorKind));
+        ASSERT(m_superBinding == static_cast(superBinding));
+        ASSERT(m_scriptMode == static_cast(scriptMode));
+    }
+
+    bool usesEval() const { return m_usesEval; }
+    bool isStrictMode() const { return m_isStrictMode; }
+    bool isConstructor() const { return m_isConstructor; }
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+    SourceParseMode parseMode() const { return m_parseMode; }
+    DerivedContextType derivedContextType() const { return static_cast(m_derivedContextType); }
+    EvalContextType evalContextType() const { return static_cast(m_evalContextType); }
+    bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+    bool isClassContext() const { return m_isClassContext; }
+
+private:
+    unsigned m_usesEval : 1;
+    unsigned m_isStrictMode : 1;
+    unsigned m_isConstructor : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_superBinding : 1;
+    unsigned m_scriptMode: 1;
+    SourceParseMode m_parseMode;
+    unsigned m_derivedContextType : 2;
+    unsigned m_isArrowFunctionContext : 1;
+    unsigned m_isClassContext : 1;
+    unsigned m_evalContextType : 2;
+};
+
+} // namespace JSC
diff --git a/bytecode/ExecutionCounter.cpp b/bytecode/ExecutionCounter.cpp
new file mode 100644
index 0000000..237c0e7
--- /dev/null
+++ b/bytecode/ExecutionCounter.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ExecutionCounter.h"
+
+#include "CodeBlock.h"
+#include "ExecutableAllocator.h"
+#include "JSCInlines.h"
+#include "VMInlines.h"
+#include 
+
+namespace JSC {
+
+template
+ExecutionCounter::ExecutionCounter()
+{
+    reset();
+}
+
+template
+void ExecutionCounter::forceSlowPathConcurrently()
+{
+    m_counter = 0;
+}
+
+template
+bool ExecutionCounter::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock)
+{
+    if (hasCrossedThreshold(codeBlock))
+        return true;
+    
+    if (setThreshold(codeBlock))
+        return true;
+    
+    return false;
+}
+
+template
+void ExecutionCounter::setNewThreshold(int32_t threshold, CodeBlock* codeBlock)
+{
+    reset();
+    m_activeThreshold = threshold;
+    setThreshold(codeBlock);
+}
+
+template
+void ExecutionCounter::deferIndefinitely()
+{
+    m_totalCount = 0;
+    m_activeThreshold = std::numeric_limits::max();
+    m_counter = std::numeric_limits::min();
+}
+
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+    double multiplier =
+        ExecutableAllocator::memoryPressureMultiplier(
+            codeBlock->baselineAlternative()->predictedMachineCodeSize());
+#else
+    // This code path will probably not be taken, but if it is, we fake it.
+    double multiplier = 1.0;
+    UNUSED_PARAM(codeBlock);
+#endif
+    ASSERT(multiplier >= 1.0);
+    return multiplier * value;
+}
+
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock)
+{
+    double doubleResult = applyMemoryUsageHeuristics(value, codeBlock);
+    
+    ASSERT(doubleResult >= 0);
+    
+    if (doubleResult > std::numeric_limits::max())
+        return std::numeric_limits::max();
+    
+    return static_cast(doubleResult);
+}
+
+template
+bool ExecutionCounter::hasCrossedThreshold(CodeBlock* codeBlock) const
+{
+    // This checks if the current count rounded up to the threshold we were targeting.
+    // For example, if we are using half of available executable memory and have
+    // m_activeThreshold = 1000, applyMemoryUsageHeuristics(m_activeThreshold) will be
+    // 2000, but we will pretend as if the threshold was crossed if we reach 2000 -
+    // 1000 / 2, or 1500. The reasoning here is that we want to avoid thrashing. If
+    // this method returns false, then the JIT's threshold for when it will again call
+    // into the slow path (which will call this method a second time) will be set
+    // according to the difference between the current count and the target count
+    // according to *current* memory usage. But by the time we call into this again, we
+    // may have JIT'ed more code, and so the target count will increase slightly. This
+    // may lead to a repeating pattern where the target count is slightly incremented,
+    // the JIT immediately matches that increase, calls into the slow path again, and
+    // again the target count is slightly incremented. Instead of having this vicious
+    // cycle, we declare victory a bit early if the difference between the current
+    // total and our target according to memory heuristics is small. Our definition of
+    // small is arbitrarily picked to be half of the original threshold (i.e.
+    // m_activeThreshold).
+    
+    double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock);
+    
+    double actualCount = static_cast(m_totalCount) + m_counter;
+    double desiredCount = modifiedThreshold - static_cast(
+        std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2;
+    
+    bool result = actualCount >= desiredCount;
+    
+    CODEBLOCK_LOG_EVENT(codeBlock, "thresholdCheck", ("activeThreshold = ", m_activeThreshold, ", modifiedThreshold = ", modifiedThreshold, ", actualCount = ", actualCount, ", desiredCount = ", desiredCount));
+    
+    return result;
+}
+
+template
+bool ExecutionCounter::setThreshold(CodeBlock* codeBlock)
+{
+    if (m_activeThreshold == std::numeric_limits::max()) {
+        deferIndefinitely();
+        return false;
+    }
+        
+    // Compute the true total count.
+    double trueTotalCount = count();
+    
+    // Correct the threshold for current memory usage.
+    double threshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock);
+        
+    // Threshold must be non-negative and not NaN.
+    ASSERT(threshold >= 0);
+        
+    // Adjust the threshold according to the number of executions we have already
+    // seen. This shouldn't go negative, but it might, because of round-off errors.
+    threshold -= trueTotalCount;
+        
+    if (threshold <= 0) {
+        m_counter = 0;
+        m_totalCount = trueTotalCount;
+        return true;
+    }
+
+    threshold = clippedThreshold(codeBlock->globalObject(), threshold);
+    
+    m_counter = static_cast(-threshold);
+        
+    m_totalCount = trueTotalCount + threshold;
+        
+    return false;
+}
+
+template
+void ExecutionCounter::reset()
+{
+    m_counter = 0;
+    m_totalCount = 0;
+    m_activeThreshold = 0;
+}
+
+template
+void ExecutionCounter::dump(PrintStream& out) const
+{
+    out.printf("%lf/%lf, %d", count(), static_cast(m_activeThreshold), m_counter);
+}
+
+template class ExecutionCounter;
+template class ExecutionCounter;
+
+} // namespace JSC
+
diff --git a/bytecode/ExecutionCounter.h b/bytecode/ExecutionCounter.h
new file mode 100644
index 0000000..f78a912
--- /dev/null
+++ b/bytecode/ExecutionCounter.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSGlobalObject.h"
+#include "Options.h"
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+
+enum CountingVariant {
+    CountingForBaseline,
+    CountingForUpperTiers
+};
+
+double applyMemoryUsageHeuristics(int32_t value, CodeBlock*);
+int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*);
+
+inline int32_t formattedTotalExecutionCount(float value)
+{
+    union {
+        int32_t i;
+        float f;
+    } u;
+    u.f = value;
+    return u.i;
+}
+    
+template
+class ExecutionCounter {
+public:
+    ExecutionCounter();
+    void forceSlowPathConcurrently(); // If you use this, checkIfThresholdCrossedAndSet() may still return false.
+    bool checkIfThresholdCrossedAndSet(CodeBlock*);
+    void setNewThreshold(int32_t threshold, CodeBlock*);
+    void deferIndefinitely();
+    double count() const { return static_cast(m_totalCount) + m_counter; }
+    void dump(PrintStream&) const;
+    
+    static int32_t maximumExecutionCountsBetweenCheckpoints()
+    {
+        switch (countingVariant) {
+        case CountingForBaseline:
+            return Options::maximumExecutionCountsBetweenCheckpointsForBaseline();
+        case CountingForUpperTiers:
+            return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers();
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return 0;
+        }
+    }
+    
+    template
+    static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
+    {
+        int32_t maxThreshold;
+        if (Options::randomizeExecutionCountsBetweenCheckpoints())
+            maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
+        else
+            maxThreshold = maximumExecutionCountsBetweenCheckpoints();
+        if (threshold > maxThreshold)
+            threshold = maxThreshold;
+        return threshold;
+    }
+
+private:
+    bool hasCrossedThreshold(CodeBlock*) const;
+    bool setThreshold(CodeBlock*);
+    void reset();
+
+public:
+    // NB. These are intentionally public because it will be modified from machine code.
+    
+    // This counter is incremented by the JIT or LLInt. It starts out negative and is
+    // counted up until it becomes non-negative. At the start of a counting period,
+    // the threshold we wish to reach is m_totalCount + m_counter, in the sense that
+    // we will add X to m_totalCount and subtract X from m_counter.
+    int32_t m_counter;
+
+    // Counts the total number of executions we have seen plus the ones we've set a
+    // threshold for in m_counter. Because m_counter's threshold is negative, the
+    // total number of actual executions can always be computed as m_totalCount +
+    // m_counter.
+    float m_totalCount;
+
+    // This is the threshold we were originally targeting, without any correction for
+    // the memory usage heuristics.
+    int32_t m_activeThreshold;
+};
+
+typedef ExecutionCounter BaselineExecutionCounter;
+typedef ExecutionCounter UpperTierExecutionCounter;
+
+} // namespace JSC
diff --git a/bytecode/ExitKind.cpp b/bytecode/ExitKind.cpp
new file mode 100644
index 0000000..f1ea76d
--- /dev/null
+++ b/bytecode/ExitKind.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ExitKind.h"
+
+#include 
+#include 
+
+namespace JSC {
+
+const char* exitKindToString(ExitKind kind)
+{
+    switch (kind) {
+    case ExitKindUnset:
+        return "Unset";
+    case BadType:
+        return "BadType";
+    case BadCell:
+        return "BadCell";
+    case BadIdent:
+        return "BadIdent";
+    case BadExecutable:
+        return "BadExecutable";
+    case BadCache:
+        return "BadCache";
+    case BadConstantCache:
+        return "BadConstantCache";
+    case BadIndexingType:
+        return "BadIndexingType";
+    case BadTypeInfoFlags:
+        return "BadTypeInfoFlags";
+    case Overflow:
+        return "Overflow";
+    case NegativeZero:
+        return "NegativeZero";
+    case Int52Overflow:
+        return "Int52Overflow";
+    case StoreToHole:
+        return "StoreToHole";
+    case LoadFromHole:
+        return "LoadFromHole";
+    case OutOfBounds:
+        return "OutOfBounds";
+    case InadequateCoverage:
+        return "InadequateCoverage";
+    case ArgumentsEscaped:
+        return "ArgumentsEscaped";
+    case ExoticObjectMode:
+        return "ExoticObjectMode";
+    case NotStringObject:
+        return "NotStringObject";
+    case VarargsOverflow:
+        return "VarargsOverflow";
+    case TDZFailure:
+        return "TDZFailure";
+    case HoistingFailed:
+        return "HoistingFailed";
+    case Uncountable:
+        return "Uncountable";
+    case UncountableInvalidation:
+        return "UncountableInvalidation";
+    case WatchdogTimerFired:
+        return "WatchdogTimerFired";
+    case DebuggerEvent:
+        return "DebuggerEvent";
+    case ExceptionCheck:
+        return "ExceptionCheck";
+    case GenericUnwind:
+        return "GenericUnwind";
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return "Unknown";
+}
+
+bool exitKindMayJettison(ExitKind kind)
+{
+    switch (kind) {
+    case ExceptionCheck:
+    case GenericUnwind:
+        return false;
+    default:
+        return true;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+    return false;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::ExitKind kind)
+{
+    out.print(exitKindToString(kind));
+}
+
+} // namespace WTF
+
diff --git a/bytecode/ExitKind.h b/bytecode/ExitKind.h
new file mode 100644
index 0000000..a6c2e0e
--- /dev/null
+++ b/bytecode/ExitKind.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum ExitKind : uint8_t {
+    ExitKindUnset,
+    BadType, // We exited because a type prediction was wrong.
+    BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks.
+    BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val.
+    BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
+    BadCache, // We exited because an inline cache was wrong.
+    BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
+    BadIndexingType, // We exited because an indexing type was wrong.
+    BadTypeInfoFlags, // We exited because we made an incorrect assumption about what TypeInfo flags we would see.
+    Overflow, // We exited because of overflow.
+    NegativeZero, // We exited because we encountered negative zero.
+    Int52Overflow, // We exited because of an Int52 overflow.
+    StoreToHole, // We had a store to a hole.
+    LoadFromHole, // We had a load from a hole.
+    OutOfBounds, // We had an out-of-bounds access to an array.
+    InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
+    ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to.
+    ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments).
+    NotStringObject, // We exited because we shouldn't have attempted to optimize string object access.
+    VarargsOverflow, // We exited because a varargs call passed more arguments than we expected.
+    TDZFailure, // We exited because we were in the TDZ and accessed the variable.
+    HoistingFailed, // Something that was hoisted exited. So, assume that hoisting is a bad idea.
+    Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
+    UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated.
+    WatchdogTimerFired, // We exited because we need to service the watchdog timer.
+    DebuggerEvent, // We exited because we need to service the debugger.
+    ExceptionCheck, // We exited because a direct exception check showed that we threw an exception from a C call.
+    GenericUnwind, // We exited because a we arrived at this OSR exit from genericUnwind.
+};
+
+const char* exitKindToString(ExitKind);
+bool exitKindMayJettison(ExitKind);
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitKind);
+
+} // namespace WTF
diff --git a/bytecode/ExitingJITType.cpp b/bytecode/ExitingJITType.cpp
new file mode 100644
index 0000000..aa8f120
--- /dev/null
+++ b/bytecode/ExitingJITType.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ExitingJITType.h"
+
+#include 
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ExitingJITType type)
+{
+    switch (type) {
+    case ExitFromAnything:
+        out.print("FromAnything");
+        return;
+    case ExitFromDFG:
+        out.print("FromDFG");
+        return;
+    case ExitFromFTL:
+        out.print("FromFTL");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/bytecode/ExitingJITType.h b/bytecode/ExitingJITType.h
new file mode 100644
index 0000000..dfbfee4
--- /dev/null
+++ b/bytecode/ExitingJITType.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JITCode.h"
+
+namespace JSC {
+
+enum ExitingJITType : uint8_t {
+    ExitFromAnything,
+    ExitFromDFG,
+    ExitFromFTL
+};
+
+inline ExitingJITType exitingJITTypeFor(JITCode::JITType type)
+{
+    switch (type) {
+    case JITCode::DFGJIT:
+        return ExitFromDFG;
+    case JITCode::FTLJIT:
+        return ExitFromFTL;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ExitFromAnything;
+    }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::ExitingJITType);
+
+} // namespace WTF
diff --git a/bytecode/ExpressionRangeInfo.h b/bytecode/ExpressionRangeInfo.h
new file mode 100644
index 0000000..8f83527
--- /dev/null
+++ b/bytecode/ExpressionRangeInfo.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+struct ExpressionRangeInfo {
+    // Line and column values are encoded in 1 of 3 modes depending on the size
+    // of their values. These modes are:
+    //
+    //   1. FatLine: 22-bit line, 8-bit column.
+    //   2. FatColumn: 8-bit line, 22-bit column.
+    //   3. FatLineAndColumn: 32-bit line, 32-bit column.
+    //
+    // For the first 2 modes, the line and column will be encoded in the 30-bit
+    // position field in the ExpressionRangeInfo. For the FatLineAndColumn mode,
+    // the position field will hold an index into a FatPosition vector which
+    // holds the FatPosition records with the full 32-bit line and column values.
+
+    enum {
+        FatLineMode,
+        FatColumnMode,
+        FatLineAndColumnMode
+    };
+
+    struct FatPosition {
+        uint32_t line;
+        uint32_t column;
+    };
+
+    enum {
+        FatLineModeLineShift = 8,
+        FatLineModeLineMask = (1 << 22) - 1,
+        FatLineModeColumnMask = (1 << 8) - 1,
+        FatColumnModeLineShift = 22,
+        FatColumnModeLineMask = (1 << 8) - 1,
+        FatColumnModeColumnMask = (1 << 22) - 1
+    };
+
+    enum {
+        MaxOffset = (1 << 7) - 1, 
+        MaxDivot = (1 << 25) - 1,
+        MaxFatLineModeLine = (1 << 22) - 1,
+        MaxFatLineModeColumn = (1 << 8) - 1,
+        MaxFatColumnModeLine = (1 << 8) - 1,
+        MaxFatColumnModeColumn = (1 << 22) - 1
+    };
+
+    void encodeFatLineMode(unsigned line, unsigned column)
+    {
+        ASSERT(line <= MaxFatLineModeLine);
+        ASSERT(column <= MaxFatLineModeColumn);
+        position = ((line & FatLineModeLineMask) << FatLineModeLineShift | (column & FatLineModeColumnMask));
+    }
+
+    void encodeFatColumnMode(unsigned line, unsigned column)
+    {
+        ASSERT(line <= MaxFatColumnModeLine);
+        ASSERT(column <= MaxFatColumnModeColumn);
+        position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask));
+    }
+
+    void decodeFatLineMode(unsigned& line, unsigned& column) const
+    {
+        line = (position >> FatLineModeLineShift) & FatLineModeLineMask;
+        column = position & FatLineModeColumnMask;
+    }
+
+    void decodeFatColumnMode(unsigned& line, unsigned& column) const
+    {
+        line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask;
+        column = position & FatColumnModeColumnMask;
+    }
+
+    uint32_t instructionOffset : 25;
+    uint32_t startOffset : 7;
+    uint32_t divotPoint : 25;
+    uint32_t endOffset : 7;
+    uint32_t mode : 2;
+    uint32_t position : 30;
+};
+
+} // namespace JSC
diff --git a/bytecode/FullBytecodeLiveness.h b/bytecode/FullBytecodeLiveness.h
new file mode 100644
index 0000000..073ce27
--- /dev/null
+++ b/bytecode/FullBytecodeLiveness.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+class BytecodeLivenessAnalysis;
+
+typedef HashMap, WTF::UnsignedWithZeroKeyHashTraits> BytecodeToBitmapMap;
+
+class FullBytecodeLiveness {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    const FastBitVector& getLiveness(unsigned bytecodeIndex) const
+    {
+        return m_map[bytecodeIndex];
+    }
+    
+    bool operandIsLive(int operand, unsigned bytecodeIndex) const
+    {
+        return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand);
+    }
+    
+private:
+    friend class BytecodeLivenessAnalysis;
+    
+    Vector m_map;
+};
+
+} // namespace JSC
diff --git a/bytecode/FunctionCodeBlock.cpp b/bytecode/FunctionCodeBlock.cpp
new file mode 100644
index 0000000..56eadc6
--- /dev/null
+++ b/bytecode/FunctionCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FunctionCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo FunctionCodeBlock::s_info = {
+    "FunctionCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(FunctionCodeBlock)
+};
+
+void FunctionCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~FunctionCodeBlock();
+}
+
+} // namespace JSC
diff --git a/bytecode/FunctionCodeBlock.h b/bytecode/FunctionCodeBlock.h
new file mode 100644
index 0000000..5b418b1
--- /dev/null
+++ b/bytecode/FunctionCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+class FunctionCodeBlock : public CodeBlock {
+public:
+    typedef CodeBlock Base;
+    DECLARE_INFO;
+
+    static FunctionCodeBlock* create(VM* vm, CopyParsedBlockTag, FunctionCodeBlock& other)
+    {
+        FunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static FunctionCodeBlock* create(VM* vm, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+        PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+    {
+        FunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            FunctionCodeBlock(vm, vm->functionCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    FunctionCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, FunctionCodeBlock& other)
+        : CodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    FunctionCodeBlock(VM* vm, Structure* structure, FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope,
+        PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
+    {
+    }
+    
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/bytecode/GetByIdStatus.cpp b/bytecode/GetByIdStatus.cpp
new file mode 100644
index 0000000..a6c458c
--- /dev/null
+++ b/bytecode/GetByIdStatus.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "GetByIdStatus.h"
+
+#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "JSCInlines.h"
+#include "JSScope.h"
+#include "LLIntData.h"
+#include "LowLevelInterpreter.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+#include 
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+bool GetByIdStatus::appendVariant(const GetByIdVariant& variant)
+{
+    // Attempt to merge this variant with an already existing variant.
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].attemptToMerge(variant))
+            return true;
+    }
+    
+    // Make sure there is no overlap. We should have pruned out opportunities for
+    // overlap but it's possible that an inline cache got into a weird state. We are
+    // defensive and bail if we detect crazy.
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].structureSet().overlaps(variant.structureSet()))
+            return false;
+    }
+    
+    m_variants.append(variant);
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool GetByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+}
+#endif
+
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
+{
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+    UNUSED_PARAM(uid);
+
+    VM& vm = *profiledBlock->vm();
+    
+    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+    Opcode opcode = instruction[0].u.opcode;
+
+    ASSERT(opcode == LLInt::getOpcode(op_get_array_length) || opcode == LLInt::getOpcode(op_try_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_proto_load) || opcode == LLInt::getOpcode(op_get_by_id) || opcode == LLInt::getOpcode(op_get_by_id_unset));
+
+    // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load.
+    // https://bugs.webkit.org/show_bug.cgi?id=158039
+    if (opcode != LLInt::getOpcode(op_get_by_id))
+        return GetByIdStatus(NoInformation, false);
+
+    StructureID structureID = instruction[4].u.structureID;
+    if (!structureID)
+        return GetByIdStatus(NoInformation, false);
+
+    Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+    if (structure->takesSlowPathInDFGForImpureProperty())
+        return GetByIdStatus(NoInformation, false);
+
+    unsigned attributes;
+    PropertyOffset offset = structure->getConcurrently(uid, attributes);
+    if (!isValidOffset(offset))
+        return GetByIdStatus(NoInformation, false);
+    if (attributes & CustomAccessor)
+        return GetByIdStatus(NoInformation, false);
+    
+    return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset));
+}
+
+GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
+{
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
+
+    GetByIdStatus result;
+
+#if ENABLE(DFG_JIT)
+    result = computeForStubInfoWithoutExitSiteFeedback(
+        locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+    
+    if (!result.takesSlowPath()
+        && hasExitSite(locker, profiledBlock, bytecodeIndex))
+        return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true);
+#else
+    UNUSED_PARAM(map);
+#endif
+
+    if (!result)
+        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+    
+    return result;
+}
+
+#if ENABLE(DFG_JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+    GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+        locker, profiledBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex));
+
+    if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+        return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true);
+    return result;
+}
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(JIT)
+GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid,
+    CallLinkStatus::ExitSiteData callExitSiteData)
+{
+    if (!stubInfo || !stubInfo->everConsidered)
+        return GetByIdStatus(NoInformation);
+
+    PolymorphicAccess* list = 0;
+    State slowPathState = TakesSlowPath;
+    if (stubInfo->cacheType == CacheType::Stub) {
+        list = stubInfo->u.stub;
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.doesCalls())
+                slowPathState = MakesCalls;
+        }
+    }
+    
+    if (stubInfo->tookSlowPath)
+        return GetByIdStatus(slowPathState);
+    
+    // Finally figure out if we can derive an access strategy.
+    GetByIdStatus result;
+    result.m_state = Simple;
+    result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only.
+    switch (stubInfo->cacheType) {
+    case CacheType::Unset:
+        return GetByIdStatus(NoInformation);
+        
+    case CacheType::GetByIdSelf: {
+        Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get();
+        if (structure->takesSlowPathInDFGForImpureProperty())
+            return GetByIdStatus(slowPathState, true);
+        unsigned attributes;
+        GetByIdVariant variant;
+        variant.m_offset = structure->getConcurrently(uid, attributes);
+        if (!isValidOffset(variant.m_offset))
+            return GetByIdStatus(slowPathState, true);
+        if (attributes & CustomAccessor)
+            return GetByIdStatus(slowPathState, true);
+        
+        variant.m_structureSet.add(structure);
+        bool didAppend = result.appendVariant(variant);
+        ASSERT_UNUSED(didAppend, didAppend);
+        return result;
+    }
+        
+    case CacheType::Stub: {
+        for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) {
+            const AccessCase& access = list->at(listIndex);
+            if (access.viaProxy())
+                return GetByIdStatus(slowPathState, true);
+            
+            Structure* structure = access.structure();
+            if (!structure) {
+                // The null structure cases arise due to array.length and string.length. We have no way
+                // of creating a GetByIdVariant for those, and we don't really have to since the DFG
+                // handles those cases in FixupPhase using value profiling. That's a bit awkward - we
+                // shouldn't have to use value profiling to discover something that the AccessCase
+                // could have told us. But, it works well enough. So, our only concern here is to not
+                // crash on null structure.
+                return GetByIdStatus(slowPathState, true);
+            }
+            
+            ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+                structure, access.conditionSet(), uid);
+             
+            switch (complexGetStatus.kind()) {
+            case ComplexGetStatus::ShouldSkip:
+                continue;
+                 
+            case ComplexGetStatus::TakesSlowPath:
+                return GetByIdStatus(slowPathState, true);
+                 
+            case ComplexGetStatus::Inlineable: {
+                std::unique_ptr callLinkStatus;
+                JSFunction* intrinsicFunction = nullptr;
+                DOMJIT::GetterSetter* domJIT = nullptr;
+
+                switch (access.type()) {
+                case AccessCase::Load:
+                case AccessCase::GetGetter:
+                case AccessCase::Miss: {
+                    break;
+                }
+                case AccessCase::IntrinsicGetter: {
+                    intrinsicFunction = access.intrinsicFunction();
+                    break;
+                }
+                case AccessCase::Getter: {
+                    callLinkStatus = std::make_unique();
+                    if (CallLinkInfo* callLinkInfo = access.callLinkInfo()) {
+                        *callLinkStatus = CallLinkStatus::computeFor(
+                            locker, profiledBlock, *callLinkInfo, callExitSiteData);
+                    }
+                    break;
+                }
+                case AccessCase::CustomAccessorGetter: {
+                    domJIT = access.domJIT();
+                    if (!domJIT)
+                        return GetByIdStatus(slowPathState, true);
+                    result.m_state = Custom;
+                    break;
+                }
+                default: {
+                    // FIXME: It would be totally sweet to support more of these at some point in the
+                    // future. https://bugs.webkit.org/show_bug.cgi?id=133052
+                    return GetByIdStatus(slowPathState, true);
+                } }
+
+                ASSERT((AccessCase::Miss == access.type()) == (access.offset() == invalidOffset));
+                GetByIdVariant variant(
+                    StructureSet(structure), complexGetStatus.offset(),
+                    complexGetStatus.conditionSet(), WTFMove(callLinkStatus),
+                    intrinsicFunction,
+                    domJIT);
+
+                if (!result.appendVariant(variant))
+                    return GetByIdStatus(slowPathState, true);
+
+                if (domJIT) {
+                    // Give up when cutom accesses are not merged into one.
+                    if (result.numVariants() != 1)
+                        return GetByIdStatus(slowPathState, true);
+                } else {
+                    // Give up when custom access and simple access are mixed.
+                    if (result.m_state == Custom)
+                        return GetByIdStatus(slowPathState, true);
+                }
+                break;
+            } }
+        }
+        
+        return result;
+    }
+        
+    default:
+        return GetByIdStatus(slowPathState, true);
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return GetByIdStatus();
+}
+#endif // ENABLE(JIT)
+
+GetByIdStatus GetByIdStatus::computeFor(
+    CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap,
+    StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+    if (dfgBlock) {
+        CallLinkStatus::ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(profiledBlock->m_lock);
+            exitSiteData = CallLinkStatus::computeExitSiteData(
+                locker, profiledBlock, codeOrigin.bytecodeIndex);
+        }
+        
+        GetByIdStatus result;
+        {
+            ConcurrentJSLocker locker(dfgBlock->m_lock);
+            result = computeForStubInfoWithoutExitSiteFeedback(
+                locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+        }
+
+        if (result.takesSlowPath())
+            return result;
+    
+        {
+            ConcurrentJSLocker locker(profiledBlock->m_lock);
+            if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex))
+                return GetByIdStatus(TakesSlowPath, true);
+        }
+        
+        if (result.isSet())
+            return result;
+    }
+#else
+    UNUSED_PARAM(dfgBlock);
+    UNUSED_PARAM(dfgMap);
+#endif
+
+    return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
+
+GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
+{
+    // For now we only handle the super simple self access case. We could handle the
+    // prototype case in the future.
+    
+    if (set.isEmpty())
+        return GetByIdStatus();
+
+    if (parseIndex(*uid))
+        return GetByIdStatus(TakesSlowPath);
+    
+    GetByIdStatus result;
+    result.m_state = Simple;
+    result.m_wasSeenInJIT = false;
+    for (unsigned i = 0; i < set.size(); ++i) {
+        Structure* structure = set[i];
+        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+            return GetByIdStatus(TakesSlowPath);
+        
+        if (!structure->propertyAccessesAreCacheable())
+            return GetByIdStatus(TakesSlowPath);
+        
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (!isValidOffset(offset))
+            return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it.
+        if (attributes & Accessor)
+            return GetByIdStatus(MakesCalls); // We could be smarter here, like strength-reducing this to a Call.
+        if (attributes & CustomAccessor)
+            return GetByIdStatus(TakesSlowPath);
+        
+        if (!result.appendVariant(GetByIdVariant(structure, offset)))
+            return GetByIdStatus(TakesSlowPath);
+    }
+    
+    return result;
+}
+
+bool GetByIdStatus::makesCalls() const
+{
+    switch (m_state) {
+    case NoInformation:
+    case TakesSlowPath:
+    case Custom:
+        return false;
+    case Simple:
+        for (unsigned i = m_variants.size(); i--;) {
+            if (m_variants[i].callLinkStatus())
+                return true;
+        }
+        return false;
+    case MakesCalls:
+        return true;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+
+    return false;
+}
+
+void GetByIdStatus::filter(const StructureSet& set)
+{
+    if (m_state != Simple)
+        return;
+    
+    // FIXME: We could also filter the variants themselves.
+    
+    m_variants.removeAllMatching(
+        [&] (GetByIdVariant& variant) -> bool {
+            return !variant.structureSet().overlaps(set);
+        });
+    
+    if (m_variants.isEmpty())
+        m_state = NoInformation;
+}
+
+void GetByIdStatus::dump(PrintStream& out) const
+{
+    out.print("(");
+    switch (m_state) {
+    case NoInformation:
+        out.print("NoInformation");
+        break;
+    case Simple:
+        out.print("Simple");
+        break;
+    case Custom:
+        out.print("Custom");
+        break;
+    case TakesSlowPath:
+        out.print("TakesSlowPath");
+        break;
+    case MakesCalls:
+        out.print("MakesCalls");
+        break;
+    }
+    out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")");
+}
+
+} // namespace JSC
+
diff --git a/bytecode/GetByIdStatus.h b/bytecode/GetByIdStatus.h
new file mode 100644
index 0000000..e8aad89
--- /dev/null
+++ b/bytecode/GetByIdStatus.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallLinkStatus.h"
+#include "CodeOrigin.h"
+#include "ConcurrentJSLock.h"
+#include "ExitingJITType.h"
+#include "GetByIdVariant.h"
+
+namespace JSC {
+
+class CodeBlock;
+class StructureStubInfo;
+
+typedef HashMap StubInfoMap;
+
+class GetByIdStatus {
+public:
+    enum State {
+        NoInformation,  // It's uncached so we have no information.
+        Simple,         // It's cached for a simple access to a known object property with
+                        // a possible structure chain and a possible specific value.
+        Custom,         // It's cached for a custom accessor with a possible structure chain.
+        TakesSlowPath,  // It's known to often take slow path.
+        MakesCalls      // It's known to take paths that make calls.
+    };
+
+    GetByIdStatus()
+        : m_state(NoInformation)
+    {
+    }
+    
+    explicit GetByIdStatus(State state)
+        : m_state(state)
+    {
+        ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls);
+    }
+    
+    GetByIdStatus(
+        State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant())
+        : m_state(state)
+        , m_wasSeenInJIT(wasSeenInJIT)
+    {
+        ASSERT((state == Simple || state == Custom) == variant.isSet());
+        m_variants.append(variant);
+    }
+    
+    static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid);
+    
+    static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(DFG_JIT)
+    static GetByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
+
+    State state() const { return m_state; }
+    
+    bool isSet() const { return m_state != NoInformation; }
+    bool operator!() const { return !isSet(); }
+    bool isSimple() const { return m_state == Simple; }
+    bool isCustom() const { return m_state == Custom; }
+
+    size_t numVariants() const { return m_variants.size(); }
+    const Vector& variants() const { return m_variants; }
+    const GetByIdVariant& at(size_t index) const { return m_variants[index]; }
+    const GetByIdVariant& operator[](size_t index) const { return at(index); }
+
+    bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls || m_state == Custom; }
+    bool makesCalls() const;
+    
+    bool wasSeenInJIT() const { return m_wasSeenInJIT; }
+    
+    // Attempts to reduce the set of variants to fit the given structure set. This may be approximate.
+    void filter(const StructureSet&);
+    
+    void dump(PrintStream&) const;
+    
+private:
+#if ENABLE(DFG_JIT)
+    static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+    static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback(
+        const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*,
+        UniquedStringImpl* uid, CallLinkStatus::ExitSiteData);
+#endif
+    static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    
+    bool appendVariant(const GetByIdVariant&);
+    
+    State m_state;
+    Vector m_variants;
+    bool m_wasSeenInJIT;
+};
+
+} // namespace JSC
diff --git a/bytecode/GetByIdVariant.cpp b/bytecode/GetByIdVariant.cpp
new file mode 100644
index 0000000..d940b62
--- /dev/null
+++ b/bytecode/GetByIdVariant.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "GetByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+GetByIdVariant::GetByIdVariant(
+    const StructureSet& structureSet, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet,
+    std::unique_ptr callLinkStatus,
+    JSFunction* intrinsicFunction,
+    DOMJIT::GetterSetter* domJIT)
+    : m_structureSet(structureSet)
+    , m_conditionSet(conditionSet)
+    , m_offset(offset)
+    , m_callLinkStatus(WTFMove(callLinkStatus))
+    , m_intrinsicFunction(intrinsicFunction)
+    , m_domJIT(domJIT)
+{
+    if (!structureSet.size()) {
+        ASSERT(offset == invalidOffset);
+        ASSERT(conditionSet.isEmpty());
+    }
+    if (intrinsicFunction)
+        ASSERT(intrinsic() != NoIntrinsic);
+}
+                     
+GetByIdVariant::~GetByIdVariant() { }
+
+GetByIdVariant::GetByIdVariant(const GetByIdVariant& other)
+    : GetByIdVariant()
+{
+    *this = other;
+}
+
+GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other)
+{
+    m_structureSet = other.m_structureSet;
+    m_conditionSet = other.m_conditionSet;
+    m_offset = other.m_offset;
+    m_intrinsicFunction = other.m_intrinsicFunction;
+    m_domJIT = other.m_domJIT;
+    if (other.m_callLinkStatus)
+        m_callLinkStatus = std::make_unique(*other.m_callLinkStatus);
+    else
+        m_callLinkStatus = nullptr;
+    return *this;
+}
+
+inline bool GetByIdVariant::canMergeIntrinsicStructures(const GetByIdVariant& other) const
+{
+    if (m_intrinsicFunction != other.m_intrinsicFunction)
+        return false;
+    switch (intrinsic()) {
+    case TypedArrayByteLengthIntrinsic: {
+        // We can merge these sets as long as the element size of the two sets is the same.
+        TypedArrayType thisType = (*m_structureSet.begin())->classInfo()->typedArrayStorageType;
+        TypedArrayType otherType = (*other.m_structureSet.begin())->classInfo()->typedArrayStorageType;
+
+        ASSERT(isTypedView(thisType) && isTypedView(otherType));
+
+        return logElementSize(thisType) == logElementSize(otherType);
+    }
+
+    default:
+        return true;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other)
+{
+    if (m_offset != other.m_offset)
+        return false;
+    if (m_callLinkStatus || other.m_callLinkStatus)
+        return false;
+
+    if (!canMergeIntrinsicStructures(other))
+        return false;
+
+    if (m_domJIT != other.m_domJIT)
+        return false;
+
+    if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty())
+        return false;
+    
+    ObjectPropertyConditionSet mergedConditionSet;
+    if (!m_conditionSet.isEmpty()) {
+        mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet);
+        if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition())
+            return false;
+    }
+    m_conditionSet = mergedConditionSet;
+    
+    m_structureSet.merge(other.m_structureSet);
+    
+    return true;
+}
+
+void GetByIdVariant::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!isSet()) {
+        out.print("");
+        return;
+    }
+    
+    out.print(
+        "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context));
+    out.print(", offset = ", offset());
+    if (m_callLinkStatus)
+        out.print(", call = ", *m_callLinkStatus);
+    if (m_intrinsicFunction)
+        out.print(", intrinsic = ", *m_intrinsicFunction);
+    if (m_domJIT)
+        out.print(", domjit = ", RawPointer(m_domJIT));
+    out.print(">");
+}
+
+} // namespace JSC
+
diff --git a/bytecode/GetByIdVariant.h b/bytecode/GetByIdVariant.h
new file mode 100644
index 0000000..8ded248
--- /dev/null
+++ b/bytecode/GetByIdVariant.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallLinkStatus.h"
+#include "JSCJSValue.h"
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CallLinkStatus;
+class GetByIdStatus;
+struct DumpContext;
+
+class GetByIdVariant {
+public:
+    GetByIdVariant(
+        const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        std::unique_ptr = nullptr,
+        JSFunction* = nullptr,
+        DOMJIT::GetterSetter* = nullptr);
+
+    ~GetByIdVariant();
+    
+    GetByIdVariant(const GetByIdVariant&);
+    GetByIdVariant& operator=(const GetByIdVariant&);
+    
+    bool isSet() const { return !!m_structureSet.size(); }
+    bool operator!() const { return !isSet(); }
+    const StructureSet& structureSet() const { return m_structureSet; }
+    StructureSet& structureSet() { return m_structureSet; }
+
+    // A non-empty condition set means that this is a prototype load.
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+    PropertyOffset offset() const { return m_offset; }
+    CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); }
+    JSFunction* intrinsicFunction() const { return m_intrinsicFunction; }
+    Intrinsic intrinsic() const { return m_intrinsicFunction ? m_intrinsicFunction->intrinsic() : NoIntrinsic; }
+    DOMJIT::GetterSetter* domJIT() const { return m_domJIT; }
+
+    bool isPropertyUnset() const { return offset() == invalidOffset; }
+
+    bool attemptToMerge(const GetByIdVariant& other);
+    
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    
+private:
+    friend class GetByIdStatus;
+
+    bool canMergeIntrinsicStructures(const GetByIdVariant&) const;
+    
+    StructureSet m_structureSet;
+    ObjectPropertyConditionSet m_conditionSet;
+    PropertyOffset m_offset;
+    std::unique_ptr m_callLinkStatus;
+    JSFunction* m_intrinsicFunction;
+    DOMJIT::GetterSetter* m_domJIT;
+};
+
+} // namespace JSC
diff --git a/bytecode/GlobalCodeBlock.h b/bytecode/GlobalCodeBlock.h
new file mode 100644
index 0000000..9ab2226
--- /dev/null
+++ b/bytecode/GlobalCodeBlock.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+// Program code is not marked by any function, so we make the global object
+// responsible for marking it.
+
+class GlobalCodeBlock : public CodeBlock {
+    typedef CodeBlock Base;
+
+protected:
+    GlobalCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, GlobalCodeBlock& other)
+        : CodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    GlobalCodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
+        : CodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
+    {
+    }
+};
+
+} // namespace JSC
diff --git a/bytecode/HandlerInfo.h b/bytecode/HandlerInfo.h
new file mode 100644
index 0000000..0b1bd3b
--- /dev/null
+++ b/bytecode/HandlerInfo.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeLocation.h"
+#include 
+
+namespace JSC {
+
+enum class HandlerType {
+    Illegal = 0,
+    Catch = 1,
+    Finally = 2,
+    SynthesizedFinally = 3
+};
+
+enum class RequiredHandler {
+    CatchHandler,
+    AnyHandler
+};
+
+struct HandlerInfoBase {
+    HandlerType type() const { return static_cast(typeBits); }
+    void setType(HandlerType type) { typeBits = static_cast(type); }
+
+    const char* typeName()
+    {
+        switch (type()) {
+        case HandlerType::Catch:
+            return "catch";
+        case HandlerType::Finally:
+            return "finally";
+        case HandlerType::SynthesizedFinally:
+            return "synthesized finally";
+        default:
+            ASSERT_NOT_REACHED();
+        }
+        return nullptr;
+    }
+
+    bool isCatchHandler() const { return type() == HandlerType::Catch; }
+
+    template
+    static Handler* handlerForIndex(Vector& exeptionHandlers, unsigned index, RequiredHandler requiredHandler)
+    {
+        for (Handler& handler : exeptionHandlers) {
+            if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
+                continue;
+
+            // Handlers are ordered innermost first, so the first handler we encounter
+            // that contains the source address is the correct handler to use.
+            // This index used is either the BytecodeOffset or a CallSiteIndex.
+            if (handler.start <= index && handler.end > index)
+                return &handler;
+        }
+
+        return nullptr;
+    }
+
+    uint32_t start;
+    uint32_t end;
+    uint32_t target;
+    uint32_t typeBits : 2; // HandlerType
+};
+
+struct UnlinkedHandlerInfo : public HandlerInfoBase {
+    UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType)
+    {
+        this->start = start;
+        this->end = end;
+        this->target = target;
+        setType(handlerType);
+        ASSERT(type() == handlerType);
+    }
+};
+
+struct HandlerInfo : public HandlerInfoBase {
+    void initialize(const UnlinkedHandlerInfo& unlinkedInfo)
+    {
+        start = unlinkedInfo.start;
+        end = unlinkedInfo.end;
+        target = unlinkedInfo.target;
+        typeBits = unlinkedInfo.typeBits;
+    }
+
+#if ENABLE(JIT)
+    void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label)
+    {
+        initialize(unlinkedInfo);
+        nativeCode = label;
+    }
+
+    CodeLocationLabel nativeCode;
+#endif
+};
+
+} // namespace JSC
diff --git a/bytecode/InlineAccess.cpp b/bytecode/InlineAccess.cpp
new file mode 100644
index 0000000..667492a
--- /dev/null
+++ b/bytecode/InlineAccess.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "InlineAccess.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JSArray.h"
+#include "JSCellInlines.h"
+#include "LinkBuffer.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include "StructureStubInfo.h"
+#include "VM.h"
+
+namespace JSC {
+
+void InlineAccess::dumpCacheSizesAndCrash(VM& vm)
+{
+    GPRReg base = GPRInfo::regT0;
+    GPRReg value = GPRInfo::regT1;
+#if USE(JSVALUE32_64)
+    JSValueRegs regs(base, value);
+#else
+    JSValueRegs regs(base);
+#endif
+
+    {
+        CCallHelpers jit(&vm);
+
+        GPRReg scratchGPR = value;
+        jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
+        jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
+        jit.patchableBranch32(
+            CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
+        jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
+        jit.boxInt32(scratchGPR, regs);
+
+        dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+        jit.loadPtr(
+            CCallHelpers::Address(base, JSObject::butterflyOffset()),
+            value);
+        GPRReg storageGPR = value;
+        jit.loadValue(
+            CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
+
+        dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+        jit.loadValue(
+            MacroAssembler::Address(base, 0x000ab21ca), regs);
+
+        dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+
+        jit.storeValue(
+            regs, MacroAssembler::Address(base, 0x000ab21ca));
+
+        dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    {
+        CCallHelpers jit(&vm);
+
+        jit.patchableBranch32(
+            MacroAssembler::NotEqual,
+            MacroAssembler::Address(base, JSCell::structureIDOffset()),
+            MacroAssembler::TrustedImm32(0x000ab21ca));
+
+        jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
+        jit.storeValue(
+            regs,
+            MacroAssembler::Address(base, 120342));
+
+        dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
+    }
+
+    CRASH();
+}
+
+
+template 
+ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
+{
+    if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
+        bool needsBranchCompaction = false;
+        LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
+        ASSERT(linkBuffer.isValid());
+        function(linkBuffer);
+        FINALIZE_CODE(linkBuffer, ("InlineAccessType: '%s'", name));
+        return true;
+    }
+
+    // This is helpful when determining the size for inline ICs on various
+    // platforms. You want to choose a size that usually succeeds, but sometimes
+    // there may be variability in the length of the code we generate just because
+    // of randomness. It's helpful to flip this on when running tests or browsing
+    // the web just to see how often it fails. You don't want an IC size that always fails.
+    const bool failIfCantInline = false;
+    if (failIfCantInline) {
+        dataLog("Failure for: ", name, "\n");
+        dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
+        CRASH();
+    }
+
+    return false;
+}
+
+bool InlineAccess::generateSelfPropertyAccess(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+    CCallHelpers jit(&vm);
+    
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+
+    auto branchToSlowPath = jit.patchableBranch32(
+        MacroAssembler::NotEqual,
+        MacroAssembler::Address(base, JSCell::structureIDOffset()),
+        MacroAssembler::TrustedImm32(bitwise_cast(structure->id())));
+    GPRReg storage;
+    if (isInlineOffset(offset))
+        storage = base;
+    else {
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+        storage = value.payloadGPR();
+    }
+    
+    jit.loadValue(
+        MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
+
+    bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
+{
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    allocator.lock(static_cast(stubInfo.patch.baseGPR));
+    allocator.lock(static_cast(stubInfo.patch.valueGPR));
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+    allocator.lock(static_cast(stubInfo.patch.valueTagGPR));
+#endif
+    GPRReg scratch = allocator.allocateScratchGPR();
+    if (allocator.didReuseRegisters())
+        return InvalidGPRReg;
+    return scratch;
+}
+
+ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
+{
+    return getScratchRegister(stubInfo) != InvalidGPRReg;
+}
+
+bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
+{
+    if (isInlineOffset(offset))
+        return true;
+
+    return hasFreeRegister(stubInfo);
+}
+
+bool InlineAccess::generateSelfPropertyReplace(VM& vm, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
+{
+    ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
+
+    CCallHelpers jit(&vm);
+
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+
+    auto branchToSlowPath = jit.patchableBranch32(
+        MacroAssembler::NotEqual,
+        MacroAssembler::Address(base, JSCell::structureIDOffset()),
+        MacroAssembler::TrustedImm32(bitwise_cast(structure->id())));
+
+    GPRReg storage;
+    if (isInlineOffset(offset))
+        storage = base;
+    else {
+        storage = getScratchRegister(stubInfo);
+        ASSERT(storage != InvalidGPRReg);
+        jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
+    }
+
+    jit.storeValue(
+        value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
+
+    bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
+{
+    ASSERT(array->indexingType() & IsArray);
+
+    if (!hasFreeRegister(stubInfo))
+        return false;
+
+    return array->indexingType() == ArrayWithInt32
+        || array->indexingType() == ArrayWithDouble
+        || array->indexingType() == ArrayWithContiguous;
+}
+
+bool InlineAccess::generateArrayLength(VM& vm, StructureStubInfo& stubInfo, JSArray* array)
+{
+    ASSERT(isCacheableArrayLength(stubInfo, array));
+
+    CCallHelpers jit(&vm);
+
+    GPRReg base = static_cast(stubInfo.patch.baseGPR);
+    JSValueRegs value = stubInfo.valueRegs();
+    GPRReg scratch = getScratchRegister(stubInfo);
+
+    jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
+    jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), scratch);
+    auto branchToSlowPath = jit.patchableBranch32(
+        CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
+    jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
+    jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
+    jit.boxInt32(value.payloadGPR(), value);
+
+    bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
+        linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
+    });
+    return linkedCodeInline;
+}
+
+void InlineAccess::rewireStubAsJump(VM& vm, StructureStubInfo& stubInfo, CodeLocationLabel target)
+{
+    CCallHelpers jit(&vm);
+
+    auto jump = jit.jump();
+
+    // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
+    bool needsBranchCompaction = false;
+    LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+    RELEASE_ASSERT(linkBuffer.isValid());
+    linkBuffer.link(jump, target);
+
+    FINALIZE_CODE(linkBuffer, ("InlineAccess: linking constant jump"));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/bytecode/InlineAccess.h b/bytecode/InlineAccess.h
new file mode 100644
index 0000000..3910c5b
--- /dev/null
+++ b/bytecode/InlineAccess.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CodeLocation.h"
+#include "PropertyOffset.h"
+
+namespace JSC {
+
+class JSArray;
+class Structure;
+class StructureStubInfo;
+class VM;
+
+class InlineAccess {
+public:
+
+    // This is the maximum between inline and out of line self access cases.
+    static constexpr size_t sizeForPropertyAccess()
+    {
+#if CPU(X86_64)
+        return 23;
+#elif CPU(X86)
+        return 27;
+#elif CPU(ARM64)
+        return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        return 48;
+#else
+        return 52;
+#endif
+#else
+#error "unsupported platform"
+#endif
+    }
+
+    // This is the maximum between inline and out of line property replace cases.
+    static constexpr size_t sizeForPropertyReplace()
+    {
+#if CPU(X86_64)
+        return 23;
+#elif CPU(X86)
+        return 27;
+#elif CPU(ARM64)
+        return 40;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        return 48;
+#else
+        return 48;
+#endif
+#else
+#error "unsupported platform"
+#endif
+    }
+
+    // FIXME: Make this constexpr when GCC is able to compile std::max() inside a constexpr function.
+    // https://bugs.webkit.org/show_bug.cgi?id=159436
+    //
+    // This is the maximum between the size for array length access, and the size for regular self access.
+    ALWAYS_INLINE static size_t sizeForLengthAccess()
+    {
+#if CPU(X86_64)
+        size_t size = 26;
+#elif CPU(X86)
+        size_t size = 27;
+#elif CPU(ARM64)
+        size_t size = 32;
+#elif CPU(ARM)
+#if CPU(ARM_THUMB2)
+        size_t size = 30;
+#else
+        size_t size = 32;
+#endif
+#else
+#error "unsupported platform"
+#endif
+        return std::max(size, sizeForPropertyAccess());
+    }
+
+    static bool generateSelfPropertyAccess(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool canGenerateSelfPropertyReplace(StructureStubInfo&, PropertyOffset);
+    static bool generateSelfPropertyReplace(VM&, StructureStubInfo&, Structure*, PropertyOffset);
+    static bool isCacheableArrayLength(StructureStubInfo&, JSArray*);
+    static bool generateArrayLength(VM&, StructureStubInfo&, JSArray*);
+    static void rewireStubAsJump(VM&, StructureStubInfo&, CodeLocationLabel);
+
+    // This is helpful when determining the size of an IC on
+    // various platforms. When adding a new type of IC, implement
+    // its placeholder code here, and log the size. That way we
+    // can intelligently choose sizes on various platforms.
+    NO_RETURN_DUE_TO_CRASH static void dumpCacheSizesAndCrash(VM&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/bytecode/InlineCallFrame.cpp b/bytecode/InlineCallFrame.cpp
new file mode 100644
index 0000000..97ce84d
--- /dev/null
+++ b/bytecode/InlineCallFrame.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "InlineCallFrame.h"
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JSFunction* InlineCallFrame::calleeConstant() const
+{
+    if (calleeRecovery.isConstant())
+        return jsCast(calleeRecovery.constant());
+    return nullptr;
+}
+
+JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const
+{
+    return jsCast(calleeRecovery.recover(exec));
+}
+
+CodeBlockHash InlineCallFrame::hash() const
+{
+    return baselineCodeBlock->hash();
+}
+
+CString InlineCallFrame::hashAsStringIfPossible() const
+{
+    return baselineCodeBlock->hashAsStringIfPossible();
+}
+
+CString InlineCallFrame::inferredName() const
+{
+    return jsCast(baselineCodeBlock->ownerExecutable())->inferredName().utf8();
+}
+
+void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const
+{
+    out.print(inferredName(), "#", hashAsStringIfPossible());
+}
+
+void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
+    if (isStrictMode())
+        out.print(" (StrictMode)");
+    out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast(kind));
+    if (isClosureCall)
+        out.print(", closure call");
+    else
+        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
+    out.print(", numArgs+this = ", arguments.size());
+    out.print(", stackOffset = ", stackOffset);
+    out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
+}
+
+void InlineCallFrame::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind)
+{
+    switch (kind) {
+    case JSC::InlineCallFrame::Call:
+        out.print("Call");
+        return;
+    case JSC::InlineCallFrame::Construct:
+        out.print("Construct");
+        return;
+    case JSC::InlineCallFrame::TailCall:
+        out.print("TailCall");
+        return;
+    case JSC::InlineCallFrame::CallVarargs:
+        out.print("CallVarargs");
+        return;
+    case JSC::InlineCallFrame::ConstructVarargs:
+        out.print("ConstructVarargs");
+        return;
+    case JSC::InlineCallFrame::TailCallVarargs:
+        out.print("TailCallVarargs");
+        return;
+    case JSC::InlineCallFrame::GetterCall:
+        out.print("GetterCall");
+        return;
+    case JSC::InlineCallFrame::SetterCall:
+        out.print("SetterCall");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/bytecode/InlineCallFrame.h b/bytecode/InlineCallFrame.h
new file mode 100644
index 0000000..cd2a5fe
--- /dev/null
+++ b/bytecode/InlineCallFrame.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeBlockHash.h"
+#include "CodeOrigin.h"
+#include "ValueRecovery.h"
+#include "WriteBarrier.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+struct InlineCallFrame;
+class ExecState;
+class JSFunction;
+
+struct InlineCallFrame {
+    enum Kind {
+        Call,
+        Construct,
+        TailCall,
+        CallVarargs,
+        ConstructVarargs,
+        TailCallVarargs,
+        
+        // For these, the stackOffset incorporates the argument count plus the true return PC
+        // slot.
+        GetterCall,
+        SetterCall
+    };
+
+    static CallMode callModeFor(Kind kind)
+    {
+        switch (kind) {
+        case Call:
+        case CallVarargs:
+        case GetterCall:
+        case SetterCall:
+            return CallMode::Regular;
+        case TailCall:
+        case TailCallVarargs:
+            return CallMode::Tail;
+        case Construct:
+        case ConstructVarargs:
+            return CallMode::Construct;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    static Kind kindFor(CallMode callMode)
+    {
+        switch (callMode) {
+        case CallMode::Regular:
+            return Call;
+        case CallMode::Construct:
+            return Construct;
+        case CallMode::Tail:
+            return TailCall;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static Kind varargsKindFor(CallMode callMode)
+    {
+        switch (callMode) {
+        case CallMode::Regular:
+            return CallVarargs;
+        case CallMode::Construct:
+            return ConstructVarargs;
+        case CallMode::Tail:
+            return TailCallVarargs;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static CodeSpecializationKind specializationKindFor(Kind kind)
+    {
+        switch (kind) {
+        case Call:
+        case CallVarargs:
+        case TailCall:
+        case TailCallVarargs:
+        case GetterCall:
+        case SetterCall:
+            return CodeForCall;
+        case Construct:
+        case ConstructVarargs:
+            return CodeForConstruct;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    static bool isVarargs(Kind kind)
+    {
+        switch (kind) {
+        case CallVarargs:
+        case TailCallVarargs:
+        case ConstructVarargs:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    static bool isTail(Kind kind)
+    {
+        switch (kind) {
+        case TailCall:
+        case TailCallVarargs:
+            return true;
+        default:
+            return false;
+        }
+    }
+    bool isTail() const
+    {
+        return isTail(static_cast(kind));
+    }
+
+    static CodeOrigin* computeCallerSkippingTailCalls(InlineCallFrame* inlineCallFrame, Kind* callerCallKind = nullptr)
+    {
+        CodeOrigin* codeOrigin;
+        bool tailCallee;
+        int callKind;
+        do {
+            tailCallee = inlineCallFrame->isTail();
+            callKind = inlineCallFrame->kind;
+            codeOrigin = &inlineCallFrame->directCaller;
+            inlineCallFrame = codeOrigin->inlineCallFrame;
+        } while (inlineCallFrame && tailCallee);
+
+        if (tailCallee)
+            return nullptr;
+
+        if (callerCallKind)
+            *callerCallKind = static_cast(callKind);
+
+        return codeOrigin;
+    }
+
+    CodeOrigin* getCallerSkippingTailCalls(Kind* callerCallKind = nullptr)
+    {
+        return computeCallerSkippingTailCalls(this, callerCallKind);
+    }
+
+    InlineCallFrame* getCallerInlineFrameSkippingTailCalls()
+    {
+        CodeOrigin* caller = getCallerSkippingTailCalls();
+        return caller ? caller->inlineCallFrame : nullptr;
+    }
+    
+    Vector arguments; // Includes 'this'.
+    WriteBarrier baselineCodeBlock;
+    ValueRecovery calleeRecovery;
+    CodeOrigin directCaller;
+
+    signed stackOffset : 28;
+    unsigned kind : 3; // real type is Kind
+    bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually.
+    VirtualRegister argumentCountRegister; // Only set when we inline a varargs call.
+    
+    // There is really no good notion of a "default" set of values for
+    // InlineCallFrame's fields. This constructor is here just to reduce confusion if
+    // we forgot to initialize explicitly.
+    InlineCallFrame()
+        : stackOffset(0)
+        , kind(Call)
+        , isClosureCall(false)
+    {
+    }
+    
+    bool isVarargs() const
+    {
+        return isVarargs(static_cast(kind));
+    }
+
+    CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast(kind)); }
+
+    JSFunction* calleeConstant() const;
+    
+    // Get the callee given a machine call frame to which this InlineCallFrame belongs.
+    JSFunction* calleeForCallFrame(ExecState*) const;
+    
+    CString inferredName() const;
+    CodeBlockHash hash() const;
+    CString hashAsStringIfPossible() const;
+    
+    void setStackOffset(signed offset)
+    {
+        stackOffset = offset;
+        RELEASE_ASSERT(static_cast(stackOffset) == offset);
+    }
+
+    ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); }
+    ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); }
+
+    bool isStrictMode() const { return baselineCodeBlock->isStrictMode(); }
+
+    void dumpBriefFunctionInformation(PrintStream&) const;
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+
+    MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
+
+};
+
+inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
+{
+    RELEASE_ASSERT(inlineCallFrame);
+    return inlineCallFrame->baselineCodeBlock.get();
+}
+
+inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+{
+    if (codeOrigin.inlineCallFrame)
+        return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
+    return baselineCodeBlock;
+}
+
+template 
+inline void CodeOrigin::walkUpInlineStack(const Function& function)
+{
+    CodeOrigin codeOrigin = *this;
+    while (true) {
+        function(codeOrigin);
+        if (!codeOrigin.inlineCallFrame)
+            break;
+        codeOrigin = codeOrigin.inlineCallFrame->directCaller;
+    }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::InlineCallFrame::Kind);
+
+} // namespace WTF
diff --git a/bytecode/InlineCallFrameSet.cpp b/bytecode/InlineCallFrameSet.cpp
new file mode 100644
index 0000000..402cfd0
--- /dev/null
+++ b/bytecode/InlineCallFrameSet.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "InlineCallFrameSet.h"
+
+#include "InlineCallFrame.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+InlineCallFrameSet::InlineCallFrameSet() { }
+InlineCallFrameSet::~InlineCallFrameSet() { }
+
+InlineCallFrame* InlineCallFrameSet::add()
+{
+    return m_frames.add();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/InlineCallFrameSet.h b/bytecode/InlineCallFrameSet.h
new file mode 100644
index 0000000..6c61841
--- /dev/null
+++ b/bytecode/InlineCallFrameSet.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeOrigin.h"
+#include 
+#include 
+
+namespace JSC {
+
+class InlineCallFrameSet : public RefCounted {
+public:
+    InlineCallFrameSet();
+    ~InlineCallFrameSet();
+    
+    bool isEmpty() const { return m_frames.isEmpty(); }
+    
+    InlineCallFrame* add();
+    
+    typedef Bag::iterator iterator;
+    iterator begin() { return m_frames.begin(); }
+    iterator end() { return m_frames.end(); }
+
+private:
+    Bag m_frames;
+};
+
+} // namespace JSC
diff --git a/bytecode/Instruction.h b/bytecode/Instruction.h
new file mode 100644
index 0000000..a86739f
--- /dev/null
+++ b/bytecode/Instruction.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BasicBlockLocation.h"
+#include "MacroAssembler.h"
+#include "PutByIdFlags.h"
+#include "SymbolTable.h"
+#include "TypeLocation.h"
+#include "PropertySlot.h"
+#include "SpecialPointer.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include "ToThisStatus.h"
+#include "VirtualRegister.h"
+#include 
+
+namespace JSC {
+
+class ArrayAllocationProfile;
+class ArrayProfile;
+class ObjectAllocationProfile;
+class WatchpointSet;
+struct LLIntCallLinkInfo;
+struct ValueProfile;
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+typedef void* Opcode;
+#else
+typedef OpcodeID Opcode;
+#endif
+
+struct Instruction {
+    Instruction()
+    {
+        u.jsCell.clear();
+    }
+        
+    Instruction(Opcode opcode)
+    {
+#if !ENABLE(COMPUTED_GOTO_OPCODES)
+        // We have to initialize one of the pointer members to ensure that
+        // the entire struct is initialized, when opcode is not a pointer.
+        u.jsCell.clear();
+#endif
+        u.opcode = opcode;
+    }
+
+    Instruction(int operand)
+    {
+        // We have to initialize one of the pointer members to ensure that
+        // the entire struct is initialized in 64-bit.
+        u.jsCell.clear();
+        u.operand = operand;
+    }
+    Instruction(unsigned unsignedValue)
+    {
+        // We have to initialize one of the pointer members to ensure that
+        // the entire struct is initialized in 64-bit.
+        u.jsCell.clear();
+        u.unsignedValue = unsignedValue;
+    }
+
+    Instruction(PutByIdFlags flags)
+    {
+        u.putByIdFlags = flags;
+    }
+
+    Instruction(VM& vm, JSCell* owner, Structure* structure)
+    {
+        u.structure.clear();
+        u.structure.set(vm, owner, structure);
+    }
+    Instruction(VM& vm, JSCell* owner, StructureChain* structureChain)
+    {
+        u.structureChain.clear();
+        u.structureChain.set(vm, owner, structureChain);
+    }
+    Instruction(VM& vm, JSCell* owner, JSCell* jsCell)
+    {
+        u.jsCell.clear();
+        u.jsCell.set(vm, owner, jsCell);
+    }
+
+    Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; }
+        
+    Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; }
+    Instruction(ValueProfile* profile) { u.profile = profile; }
+    Instruction(ArrayProfile* profile) { u.arrayProfile = profile; }
+    Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; }
+    Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; }
+    Instruction(WriteBarrier* variablePointer) { u.variablePointer = variablePointer; }
+    Instruction(Special::Pointer pointer) { u.specialPointer = pointer; }
+    Instruction(UniquedStringImpl* uid) { u.uid = uid; }
+    Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; }
+
+    union {
+        Opcode opcode;
+        int operand;
+        unsigned unsignedValue;
+        WriteBarrierBase structure;
+        StructureID structureID;
+        WriteBarrierBase symbolTable;
+        WriteBarrierBase structureChain;
+        WriteBarrierBase jsCell;
+        WriteBarrier* variablePointer;
+        Special::Pointer specialPointer;
+        PropertySlot::GetValueFunc getterFunc;
+        LLIntCallLinkInfo* callLinkInfo;
+        UniquedStringImpl* uid;
+        ValueProfile* profile;
+        ArrayProfile* arrayProfile;
+        ArrayAllocationProfile* arrayAllocationProfile;
+        ObjectAllocationProfile* objectAllocationProfile;
+        WatchpointSet* watchpointSet;
+        void* pointer;
+        bool* predicatePointer;
+        ToThisStatus toThisStatus;
+        TypeLocation* location;
+        BasicBlockLocation* basicBlockLocation;
+        PutByIdFlags putByIdFlags;
+    } u;
+        
+private:
+    Instruction(StructureChain*);
+    Instruction(Structure*);
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<> struct VectorTraits : VectorTraitsBase { };
+
+} // namespace WTF
diff --git a/bytecode/InternalFunctionAllocationProfile.h b/bytecode/InternalFunctionAllocationProfile.h
new file mode 100644
index 0000000..a486523
--- /dev/null
+++ b/bytecode/InternalFunctionAllocationProfile.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class InternalFunctionAllocationProfile {
+public:
+    Structure* structure() { return m_structure.get(); }
+    Structure* createAllocationStructureFromBase(VM&, JSGlobalObject*, JSCell* owner, JSObject* prototype, Structure* base);
+
+    void clear() { m_structure.clear(); }
+    void visitAggregate(SlotVisitor& visitor) { visitor.append(m_structure); }
+
+private:
+    WriteBarrier m_structure;
+};
+
+inline Structure* InternalFunctionAllocationProfile::createAllocationStructureFromBase(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, Structure* baseStructure)
+{
+    ASSERT(!m_structure || m_structure.get()->classInfo() != baseStructure->classInfo());
+
+    Structure* structure;
+    if (prototype == baseStructure->storedPrototype())
+        structure = baseStructure;
+    else
+        structure = vm.prototypeMap.emptyStructureForPrototypeFromBaseStructure(globalObject, prototype, baseStructure);
+
+    // Ensure that if another thread sees the structure, it will see it properly created.
+    WTF::storeStoreFence();
+
+    m_structure.set(vm, owner, structure);
+    return m_structure.get();
+}
+
+} // namespace JSC
diff --git a/bytecode/JumpTable.cpp b/bytecode/JumpTable.cpp
new file mode 100644
index 0000000..e22ad03
--- /dev/null
+++ b/bytecode/JumpTable.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JumpTable.h"
+
+#include 
+
+namespace JSC {
+
+int32_t SimpleJumpTable::offsetForValue(int32_t value, int32_t defaultOffset)
+{
+    if (value >= min && static_cast(value - min) < branchOffsets.size()) {
+        int32_t offset = branchOffsets[value - min];
+        if (offset)
+            return offset;
+    }
+    return defaultOffset;        
+}
+
+} // namespace JSC
diff --git a/bytecode/JumpTable.h b/bytecode/JumpTable.h
new file mode 100644
index 0000000..3335425
--- /dev/null
+++ b/bytecode/JumpTable.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "MacroAssembler.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+    struct OffsetLocation {
+        int32_t branchOffset;
+#if ENABLE(JIT)
+        CodeLocationLabel ctiOffset;
+#endif
+    };
+
+    struct StringJumpTable {
+        typedef HashMap, OffsetLocation> StringOffsetTable;
+        StringOffsetTable offsetTable;
+#if ENABLE(JIT)
+        CodeLocationLabel ctiDefault; // FIXME: it should not be necessary to store this.
+#endif
+
+        inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset)
+        {
+            StringOffsetTable::const_iterator end = offsetTable.end();
+            StringOffsetTable::const_iterator loc = offsetTable.find(value);
+            if (loc == end)
+                return defaultOffset;
+            return loc->value.branchOffset;
+        }
+
+#if ENABLE(JIT)
+        inline CodeLocationLabel ctiForValue(StringImpl* value)
+        {
+            StringOffsetTable::const_iterator end = offsetTable.end();
+            StringOffsetTable::const_iterator loc = offsetTable.find(value);
+            if (loc == end)
+                return ctiDefault;
+            return loc->value.ctiOffset;
+        }
+#endif
+        
+        void clear()
+        {
+            offsetTable.clear();
+        }
+    };
+
+    struct SimpleJumpTable {
+        // FIXME: The two Vectors can be combind into one Vector
+        Vector branchOffsets;
+        int32_t min;
+#if ENABLE(JIT)
+        Vector ctiOffsets;
+        CodeLocationLabel ctiDefault;
+#endif
+
+        int32_t offsetForValue(int32_t value, int32_t defaultOffset);
+        void add(int32_t key, int32_t offset)
+        {
+            if (!branchOffsets[key])
+                branchOffsets[key] = offset;
+        }
+
+#if ENABLE(JIT)
+        void ensureCTITable()
+        {
+            ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size());
+            ctiOffsets.grow(branchOffsets.size());
+        }
+        
+        inline CodeLocationLabel ctiForValue(int32_t value)
+        {
+            if (value >= min && static_cast(value - min) < ctiOffsets.size())
+                return ctiOffsets[value - min];
+            return ctiDefault;
+        }
+#endif
+        
+        void clear()
+        {
+            branchOffsets.clear();
+#if ENABLE(JIT)
+            ctiOffsets.clear();
+#endif
+        }
+    };
+
+} // namespace JSC
diff --git a/bytecode/LLIntCallLinkInfo.h b/bytecode/LLIntCallLinkInfo.h
new file mode 100644
index 0000000..c2cf4d1
--- /dev/null
+++ b/bytecode/LLIntCallLinkInfo.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSFunction.h"
+#include "MacroAssemblerCodeRef.h"
+#include 
+
+namespace JSC {
+
+struct Instruction;
+
+struct LLIntCallLinkInfo : public BasicRawSentinelNode {
+    LLIntCallLinkInfo()
+    {
+    }
+    
+    ~LLIntCallLinkInfo()
+    {
+        if (isOnList())
+            remove();
+    }
+    
+    bool isLinked() { return !!callee; }
+    
+    void unlink()
+    {
+        callee.clear();
+        machineCodeTarget = MacroAssemblerCodePtr();
+        if (isOnList())
+            remove();
+    }
+    
+    WriteBarrier callee;
+    WriteBarrier lastSeenCallee;
+    MacroAssemblerCodePtr machineCodeTarget;
+};
+
+} // namespace JSC
diff --git a/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp b/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
new file mode 100644
index 0000000..9a5ac01
--- /dev/null
+++ b/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
+
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction)
+    : m_key(key)
+    , m_getByIdInstruction(getByIdInstruction)
+{
+    RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint());
+    RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint());
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::install()
+{
+    RELEASE_ASSERT(m_key.isWatchable());
+
+    m_key.object()->structure()->addTransitionWatchpoint(this);
+}
+
+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal(const FireDetail& detail)
+{
+    if (m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        install();
+        return;
+    }
+
+    StringPrintStream out;
+    out.print("ObjectToStringValue Adaptation of ", m_key, " failed: ", detail);
+
+    StringFireDetail stringDetail(out.toCString().data());
+
+    CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction);
+}
+
+} // namespace JSC
diff --git a/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h b/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
new file mode 100644
index 0000000..8a73c6c
--- /dev/null
+++ b/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Instruction.h"
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint {
+public:
+    LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*);
+
+    void install();
+
+protected:
+    void fireInternal(const FireDetail&) override;
+
+private:
+    ObjectPropertyCondition m_key;
+    Instruction* m_getByIdInstruction;
+};
+
+} // namespace JSC
diff --git a/bytecode/LazyOperandValueProfile.cpp b/bytecode/LazyOperandValueProfile.cpp
new file mode 100644
index 0000000..0929d6f
--- /dev/null
+++ b/bytecode/LazyOperandValueProfile.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "LazyOperandValueProfile.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { }
+CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { }
+
+void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJSLocker& locker)
+{
+    if (!m_data)
+        return;
+    
+    for (unsigned i = 0; i < m_data->size(); ++i)
+        m_data->at(i).computeUpdatedPrediction(locker);
+}
+
+LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add(
+    const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key)
+{
+    if (!m_data)
+        m_data = std::make_unique();
+    else {
+        for (unsigned i = 0; i < m_data->size(); ++i) {
+            if (m_data->at(i).key() == key)
+                return &m_data->at(i);
+        }
+    }
+    
+    m_data->append(LazyOperandValueProfile(key));
+    return &m_data->last();
+}
+
+LazyOperandValueProfileParser::LazyOperandValueProfileParser() { }
+LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { }
+
+void LazyOperandValueProfileParser::initialize(
+    const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder)
+{
+    ASSERT(m_map.isEmpty());
+    
+    if (!holder.m_data)
+        return;
+    
+    LazyOperandValueProfile::List& data = *holder.m_data;
+    for (unsigned i = 0; i < data.size(); ++i)
+        m_map.add(data[i].key(), &data[i]);
+}
+
+LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent(
+    const LazyOperandValueProfileKey& key) const
+{
+    HashMap::const_iterator iter =
+        m_map.find(key);
+    
+    if (iter == m_map.end())
+        return 0;
+    
+    return iter->value;
+}
+
+SpeculatedType LazyOperandValueProfileParser::prediction(
+    const ConcurrentJSLocker& locker, const LazyOperandValueProfileKey& key) const
+{
+    LazyOperandValueProfile* profile = getIfPresent(key);
+    if (!profile)
+        return SpecNone;
+    
+    return profile->computeUpdatedPrediction(locker);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/LazyOperandValueProfile.h b/bytecode/LazyOperandValueProfile.h
new file mode 100644
index 0000000..9c3b068
--- /dev/null
+++ b/bytecode/LazyOperandValueProfile.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ConcurrentJSLock.h"
+#include "ValueProfile.h"
+#include "VirtualRegister.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class ScriptExecutable;
+
+class LazyOperandValueProfileKey {
+public:
+    LazyOperandValueProfileKey()
+        : m_bytecodeOffset(0) // 0 = empty value
+        , m_operand(VirtualRegister()) // not a valid operand index in our current scheme
+    {
+    }
+    
+    LazyOperandValueProfileKey(WTF::HashTableDeletedValueType)
+        : m_bytecodeOffset(1) // 1 = deleted value
+        , m_operand(VirtualRegister()) // not a valid operand index in our current scheme
+    {
+    }
+    
+    LazyOperandValueProfileKey(unsigned bytecodeOffset, VirtualRegister operand)
+        : m_bytecodeOffset(bytecodeOffset)
+        , m_operand(operand)
+    {
+        ASSERT(m_operand.isValid());
+    }
+    
+    bool operator!() const
+    {
+        return !m_operand.isValid();
+    }
+    
+    bool operator==(const LazyOperandValueProfileKey& other) const
+    {
+        return m_bytecodeOffset == other.m_bytecodeOffset
+            && m_operand == other.m_operand;
+    }
+    
+    unsigned hash() const
+    {
+        return WTF::intHash(m_bytecodeOffset) + m_operand.offset();
+    }
+    
+    unsigned bytecodeOffset() const
+    {
+        ASSERT(!!*this);
+        return m_bytecodeOffset;
+    }
+
+    VirtualRegister operand() const
+    {
+        ASSERT(!!*this);
+        return m_operand;
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return !m_operand.isValid() && m_bytecodeOffset;
+    }
+private: 
+    unsigned m_bytecodeOffset;
+    VirtualRegister m_operand;
+};
+
+struct LazyOperandValueProfileKeyHash {
+    static unsigned hash(const LazyOperandValueProfileKey& key) { return key.hash(); }
+    static bool equal(
+        const LazyOperandValueProfileKey& a,
+        const LazyOperandValueProfileKey& b) { return a == b; }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::LazyOperandValueProfileKeyHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : public GenericHashTraits {
+    static void constructDeletedValue(JSC::LazyOperandValueProfileKey& slot) { new (NotNull, &slot) JSC::LazyOperandValueProfileKey(HashTableDeletedValue); }
+    static bool isDeletedValue(const JSC::LazyOperandValueProfileKey& value) { return value.isHashTableDeletedValue(); }
+};
+
+} // namespace WTF
+
+namespace JSC {
+
+struct LazyOperandValueProfile : public MinimalValueProfile {
+    LazyOperandValueProfile()
+        : MinimalValueProfile()
+        , m_operand(VirtualRegister())
+    {
+    }
+    
+    explicit LazyOperandValueProfile(const LazyOperandValueProfileKey& key)
+        : MinimalValueProfile(key.bytecodeOffset())
+        , m_operand(key.operand())
+    {
+    }
+    
+    LazyOperandValueProfileKey key() const
+    {
+        return LazyOperandValueProfileKey(m_bytecodeOffset, m_operand);
+    }
+    
+    VirtualRegister m_operand;
+    
+    typedef SegmentedVector List;
+};
+
+class LazyOperandValueProfileParser;
+
+class CompressedLazyOperandValueProfileHolder {
+    WTF_MAKE_NONCOPYABLE(CompressedLazyOperandValueProfileHolder);
+public:
+    CompressedLazyOperandValueProfileHolder();
+    ~CompressedLazyOperandValueProfileHolder();
+    
+    void computeUpdatedPredictions(const ConcurrentJSLocker&);
+    
+    LazyOperandValueProfile* add(
+        const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key);
+    
+private:
+    friend class LazyOperandValueProfileParser;
+    std::unique_ptr m_data;
+};
+
+class LazyOperandValueProfileParser {
+    WTF_MAKE_NONCOPYABLE(LazyOperandValueProfileParser);
+public:
+    explicit LazyOperandValueProfileParser();
+    ~LazyOperandValueProfileParser();
+    
+    void initialize(
+        const ConcurrentJSLocker&, CompressedLazyOperandValueProfileHolder& holder);
+    
+    LazyOperandValueProfile* getIfPresent(
+        const LazyOperandValueProfileKey& key) const;
+    
+    SpeculatedType prediction(
+        const ConcurrentJSLocker&, const LazyOperandValueProfileKey& key) const;
+private:
+    HashMap m_map;
+};
+
+} // namespace JSC
diff --git a/bytecode/MethodOfGettingAValueProfile.cpp b/bytecode/MethodOfGettingAValueProfile.cpp
new file mode 100644
index 0000000..f479e5f
--- /dev/null
+++ b/bytecode/MethodOfGettingAValueProfile.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "MethodOfGettingAValueProfile.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "ArithProfile.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
+    CodeBlock* codeBlock, const LazyOperandValueProfileKey& key)
+{
+    MethodOfGettingAValueProfile result;
+    result.m_kind = LazyOperand;
+    result.u.lazyOperand.codeBlock = codeBlock;
+    result.u.lazyOperand.bytecodeOffset = key.bytecodeOffset();
+    result.u.lazyOperand.operand = key.operand().offset();
+    return result;
+}
+
+void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const
+{
+    switch (m_kind) {
+    case None:
+        return;
+        
+    case Ready:
+        jit.storeValue(regs, u.profile->specFailBucket(0));
+        return;
+        
+    case LazyOperand: {
+        LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
+        
+        ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
+        LazyOperandValueProfile* profile =
+            u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
+        jit.storeValue(regs, profile->specFailBucket(0));
+        return;
+    }
+        
+    case ArithProfileReady: {
+        u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters);
+        return;
+    } }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/bytecode/MethodOfGettingAValueProfile.h b/bytecode/MethodOfGettingAValueProfile.h
new file mode 100644
index 0000000..98e39db
--- /dev/null
+++ b/bytecode/MethodOfGettingAValueProfile.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+// This is guarded by ENABLE_DFG_JIT only because it uses some value profiles
+// that are currently only used if the DFG is enabled (i.e. they are not
+// available in the profile-only configuration). Hopefully someday all of
+// these #if's will disappear...
+#if ENABLE(DFG_JIT)
+
+#include "GPRInfo.h"
+#include "JSCJSValue.h"
+
+namespace JSC {
+
+class CCallHelpers;
+class CodeBlock;
+class LazyOperandValueProfileKey;
+struct ArithProfile;
+struct ValueProfile;
+
+class MethodOfGettingAValueProfile {
+public:
+    MethodOfGettingAValueProfile()
+        : m_kind(None)
+    {
+    }
+    
+    MethodOfGettingAValueProfile(ValueProfile* profile)
+    {
+        if (profile) {
+            m_kind = Ready;
+            u.profile = profile;
+        } else
+            m_kind = None;
+    }
+    
+    MethodOfGettingAValueProfile(ArithProfile* profile)
+    {
+        if (profile) {
+            m_kind = ArithProfileReady;
+            u.arithProfile = profile;
+        } else
+            m_kind = None;
+    }
+    
+    static MethodOfGettingAValueProfile fromLazyOperand(
+        CodeBlock*, const LazyOperandValueProfileKey&);
+    
+    explicit operator bool() const { return m_kind != None; }
+    
+    void emitReportValue(CCallHelpers&, JSValueRegs) const;
+    
+private:
+    enum Kind {
+        None,
+        Ready,
+        ArithProfileReady,
+        LazyOperand
+    };
+    
+    Kind m_kind;
+    union {
+        ValueProfile* profile;
+        ArithProfile* arithProfile;
+        struct {
+            CodeBlock* codeBlock;
+            unsigned bytecodeOffset;
+            int operand;
+        } lazyOperand;
+    } u;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(DFG_JIT)
diff --git a/bytecode/ModuleProgramCodeBlock.cpp b/bytecode/ModuleProgramCodeBlock.cpp
new file mode 100644
index 0000000..3d54c3a
--- /dev/null
+++ b/bytecode/ModuleProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ModuleProgramCodeBlock::s_info = {
+    "ModuleProgramCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(ModuleProgramCodeBlock)
+};
+
+void ModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~ModuleProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/bytecode/ModuleProgramCodeBlock.h b/bytecode/ModuleProgramCodeBlock.h
new file mode 100644
index 0000000..b3c3b11
--- /dev/null
+++ b/bytecode/ModuleProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+namespace JSC {
+
+class ModuleProgramCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static ModuleProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+    {
+        ModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static ModuleProgramCodeBlock* create(VM* vm, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset)
+    {
+        ModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ModuleProgramCodeBlock(vm, vm->moduleProgramCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    ModuleProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ModuleProgramCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    ModuleProgramCodeBlock(VM* vm, Structure* structure, ModuleProgramExecutable* ownerExecutable, UnlinkedModuleProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+    {
+    }
+
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/bytecode/ObjectAllocationProfile.h b/bytecode/ObjectAllocationProfile.h
new file mode 100644
index 0000000..1bc6b15
--- /dev/null
+++ b/bytecode/ObjectAllocationProfile.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "VM.h"
+#include "JSGlobalObject.h"
+#include "ObjectPrototype.h"
+#include "SlotVisitor.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class ObjectAllocationProfile {
+    friend class LLIntOffsetsExtractor;
+public:
+    static ptrdiff_t offsetOfAllocator() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_allocator); }
+    static ptrdiff_t offsetOfStructure() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_structure); }
+    static ptrdiff_t offsetOfInlineCapacity() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_inlineCapacity); }
+
+    ObjectAllocationProfile()
+        : m_allocator(0)
+        , m_inlineCapacity(0)
+    {
+    }
+
+    bool isNull() { return !m_structure; }
+
+    void initialize(VM& vm, JSGlobalObject* globalObject, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity)
+    {
+        ASSERT(!m_allocator);
+        ASSERT(!m_structure);
+        ASSERT(!m_inlineCapacity);
+
+        unsigned inlineCapacity = 0;
+        if (inferredInlineCapacity < JSFinalObject::defaultInlineCapacity()) {
+            // Try to shrink the object based on static analysis.
+            inferredInlineCapacity += possibleDefaultPropertyCount(vm, prototype);
+
+            if (!inferredInlineCapacity) {
+                // Empty objects are rare, so most likely the static analyzer just didn't
+                // see the real initializer function. This can happen with helper functions.
+                inferredInlineCapacity = JSFinalObject::defaultInlineCapacity();
+            } else if (inferredInlineCapacity > JSFinalObject::defaultInlineCapacity()) {
+                // Default properties are weak guesses, so don't allow them to turn a small
+                // object into a large object.
+                inferredInlineCapacity = JSFinalObject::defaultInlineCapacity();
+            }
+
+            inlineCapacity = inferredInlineCapacity;
+            ASSERT(inlineCapacity < JSFinalObject::maxInlineCapacity());
+        } else {
+            // Normal or large object.
+            inlineCapacity = inferredInlineCapacity;
+            if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+                inlineCapacity = JSFinalObject::maxInlineCapacity();
+        }
+
+        ASSERT(inlineCapacity > 0);
+        ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity());
+
+        size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
+        MarkedAllocator* allocator = vm.cellSpace.allocatorFor(allocationSize);
+        
+        // Take advantage of extra inline capacity available in the size class.
+        if (allocator) {
+            size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier);
+            inlineCapacity += slop;
+            if (inlineCapacity > JSFinalObject::maxInlineCapacity())
+                inlineCapacity = JSFinalObject::maxInlineCapacity();
+        }
+
+        Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(globalObject, prototype, inlineCapacity);
+
+        // Ensure that if another thread sees the structure, it will see it properly created
+        WTF::storeStoreFence();
+
+        m_allocator = allocator;
+        m_structure.set(vm, owner, structure);
+        m_inlineCapacity = inlineCapacity;
+    }
+
+    Structure* structure()
+    {
+        Structure* structure = m_structure.get();
+        // Ensure that if we see the structure, it has been properly created
+        WTF::loadLoadFence();
+        return structure;
+    }
+    unsigned inlineCapacity() { return m_inlineCapacity; }
+
+    void clear()
+    {
+        m_allocator = 0;
+        m_structure.clear();
+        m_inlineCapacity = 0;
+        ASSERT(isNull());
+    }
+
+    void visitAggregate(SlotVisitor& visitor)
+    {
+        visitor.append(m_structure);
+    }
+
+private:
+
+    unsigned possibleDefaultPropertyCount(VM& vm, JSObject* prototype)
+    {
+        if (prototype == prototype->globalObject()->objectPrototype())
+            return 0;
+
+        size_t count = 0;
+        PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols);
+        prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode());
+        PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector();
+        for (size_t i = 0; i < propertyNameVector.size(); ++i) {
+            JSValue value = prototype->getDirect(vm, propertyNameVector[i]);
+
+            // Functions are common, and are usually class-level objects that are not overridden.
+            if (jsDynamicCast(value))
+                continue;
+
+            ++count;
+
+        }
+        return count;
+    }
+
+    MarkedAllocator* m_allocator; // Precomputed to make things easier for generated code.
+    WriteBarrier m_structure;
+    unsigned m_inlineCapacity;
+};
+
+} // namespace JSC
diff --git a/bytecode/ObjectPropertyCondition.cpp b/bytecode/ObjectPropertyCondition.cpp
new file mode 100644
index 0000000..f7d8ee8
--- /dev/null
+++ b/bytecode/ObjectPropertyCondition.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ObjectPropertyCondition.h"
+
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+    
+    out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">");
+}
+
+void ObjectPropertyCondition::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint(
+    Structure* structure) const
+{
+    return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+    
+    return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+    return m_condition.validityRequiresImpurePropertyWatchpoint(structure);
+}
+
+bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+    
+    return validityRequiresImpurePropertyWatchpoint(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(Structure* structure) const
+{
+    return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValidAssumingImpurePropertyWatchpoint() const
+{
+    if (!*this)
+        return false;
+
+    return isStillValidAssumingImpurePropertyWatchpoint(m_object->structure());
+}
+
+
+bool ObjectPropertyCondition::isStillValid(Structure* structure) const
+{
+    return m_condition.isStillValid(structure, m_object);
+}
+
+bool ObjectPropertyCondition::isStillValid() const
+{
+    if (!*this)
+        return false;
+    
+    return isStillValid(m_object->structure());
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const
+{
+    return m_condition.isStillValid(structure);
+}
+
+bool ObjectPropertyCondition::structureEnsuresValidity() const
+{
+    if (!*this)
+        return false;
+    
+    return structureEnsuresValidity(m_object->structure());
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+    return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    PropertyCondition::WatchabilityEffort effort) const
+{
+    if (!*this)
+        return false;
+    
+    return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(
+    Structure* structure, PropertyCondition::WatchabilityEffort effort) const
+{
+    return m_condition.isWatchable(structure, m_object, effort);
+}
+
+bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const
+{
+    if (!*this)
+        return false;
+    
+    return isWatchable(m_object->structure(), effort);
+}
+
+bool ObjectPropertyCondition::isStillLive() const
+{
+    if (!*this)
+        return false;
+    
+    if (!Heap::isMarked(m_object))
+        return false;
+    
+    return m_condition.isStillLive();
+}
+
+void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+    if (!*this)
+        return;
+    
+    tracked.check(m_object);
+    m_condition.validateReferences(tracked);
+}
+
+ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier() const
+{
+    PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(object());
+    if (!result)
+        return ObjectPropertyCondition();
+    return ObjectPropertyCondition(object(), result);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/ObjectPropertyCondition.h b/bytecode/ObjectPropertyCondition.h
new file mode 100644
index 0000000..568736b
--- /dev/null
+++ b/bytecode/ObjectPropertyCondition.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include "PropertyCondition.h"
+#include 
+
+namespace JSC {
+
+class TrackedReferences;
+
+class ObjectPropertyCondition {
+public:
+    ObjectPropertyCondition()
+        : m_object(nullptr)
+    {
+    }
+    
+    ObjectPropertyCondition(WTF::HashTableDeletedValueType token)
+        : m_object(nullptr)
+        , m_condition(token)
+    {
+    }
+    
+    ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition)
+        : m_object(object)
+        , m_condition(condition)
+    {
+    }
+    
+    static ObjectPropertyCondition presenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes); 
+        return result;
+    }
+    
+    static ObjectPropertyCondition presence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset,
+        unsigned attributes)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return presenceWithoutBarrier(object, uid, offset, attributes);
+    }
+
+    // NOTE: The prototype is the storedPrototype, not the prototypeForLookup.
+    static ObjectPropertyCondition absenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype);
+        return result;
+    }
+    
+    static ObjectPropertyCondition absence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceWithoutBarrier(object, uid, prototype);
+    }
+    
+    static ObjectPropertyCondition absenceOfSetterWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype);
+        return result;
+    }
+    
+    static ObjectPropertyCondition absenceOfSetter(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceOfSetterWithoutBarrier(object, uid, prototype);
+    }
+    
+    static ObjectPropertyCondition equivalenceWithoutBarrier(
+        JSObject* object, UniquedStringImpl* uid, JSValue value)
+    {
+        ObjectPropertyCondition result;
+        result.m_object = object;
+        result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value);
+        return result;
+    }
+    
+    static ObjectPropertyCondition equivalence(
+        VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return equivalenceWithoutBarrier(object, uid, value);
+    }
+
+    explicit operator bool() const { return !!m_condition; }
+    
+    JSObject* object() const { return m_object; }
+    PropertyCondition condition() const { return m_condition; }
+    
+    PropertyCondition::Kind kind() const { return condition().kind(); }
+    UniquedStringImpl* uid() const { return condition().uid(); }
+    bool hasOffset() const { return condition().hasOffset(); }
+    PropertyOffset offset() const { return condition().offset(); }
+    unsigned hasAttributes() const { return condition().hasAttributes(); }
+    unsigned attributes() const { return condition().attributes(); }
+    bool hasPrototype() const { return condition().hasPrototype(); }
+    JSObject* prototype() const { return condition().prototype(); }
+    bool hasRequiredValue() const { return condition().hasRequiredValue(); }
+    JSValue requiredValue() const { return condition().requiredValue(); }
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        return WTF::PtrHash::hash(m_object) ^ m_condition.hash();
+    }
+    
+    bool operator==(const ObjectPropertyCondition& other) const
+    {
+        return m_object == other.m_object
+            && m_condition == other.m_condition;
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return !m_object && m_condition.isHashTableDeletedValue();
+    }
+    
+    // Two conditions are compatible if they are identical or if they speak of different uids or
+    // different objects. If false is returned, you have to decide how to resolve the conflict -
+    // for example if there is a Presence and an Equivalence then in some cases you'll want the
+    // more general of the two while in other cases you'll want the more specific of the two. This
+    // will also return false for contradictions, like Presence and Absence on the same
+    // object/uid. By convention, invalid conditions aren't compatible with anything.
+    bool isCompatibleWith(const ObjectPropertyCondition& other) const
+    {
+        if (!*this || !other)
+            return false;
+        return *this == other || uid() != other.uid() || object() != other.object();
+    }
+    
+    // These validity-checking methods can optionally take a Struture* instead of loading the
+    // Structure* from the object. If you're in the concurrent JIT, then you must use the forms
+    // that take an explicit Structure* because you want the compiler to optimize for the same
+    // structure that you validated (i.e. avoid a TOCTOU race).
+    
+    // Checks if the object's structure claims that the property won't be intercepted. Validity
+    // does not require watchpoints on the object.
+    bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const;
+    bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const;
+    
+    // Returns true if we need an impure property watchpoint to ensure validity even if
+    // isStillValidAccordingToStructure() returned true.
+    bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+    bool validityRequiresImpurePropertyWatchpoint() const;
+
+    // Checks if the condition still holds setting aside the need for an impure property watchpoint.
+    // Validity might still require watchpoints on the object.
+    bool isStillValidAssumingImpurePropertyWatchpoint(Structure*) const;
+    bool isStillValidAssumingImpurePropertyWatchpoint() const;
+
+    // Checks if the condition still holds. May conservatively return false, if the object and
+    // structure alone don't guarantee the condition. Note that this may return true if the
+    // condition still requires some watchpoints on the object in addition to checking the
+    // structure. If you want to check if the condition holds by using the structure alone,
+    // use structureEnsuresValidity().
+    bool isStillValid(Structure*) const;
+    bool isStillValid() const;
+    
+    // Shorthand for condition().isStillValid(structure).
+    bool structureEnsuresValidity(Structure*) const;
+    bool structureEnsuresValidity() const;
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure and possibly an impure property watchpoint.
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        Structure*,
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure.
+    bool isWatchable(
+        Structure*,
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    bool isWatchable(
+        PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const;
+    
+    bool watchingRequiresStructureTransitionWatchpoint() const
+    {
+        return condition().watchingRequiresStructureTransitionWatchpoint();
+    }
+    bool watchingRequiresReplacementWatchpoint() const
+    {
+        return condition().watchingRequiresReplacementWatchpoint();
+    }
+    
+    // This means that the objects involved in this are still live.
+    bool isStillLive() const;
+    
+    void validateReferences(const TrackedReferences&) const;
+
+    bool isValidValueForPresence(JSValue value) const
+    {
+        return condition().isValidValueForPresence(value);
+    }
+
+    ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier() const;
+
+private:
+    JSObject* m_object;
+    PropertyCondition m_condition;
+};
+
+struct ObjectPropertyConditionHash {
+    static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); }
+    static bool equal(
+        const ObjectPropertyCondition& a, const ObjectPropertyCondition& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::ObjectPropertyConditionHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/bytecode/ObjectPropertyConditionSet.cpp b/bytecode/ObjectPropertyConditionSet.cpp
new file mode 100644
index 0000000..e2e4a8f
--- /dev/null
+++ b/bytecode/ObjectPropertyConditionSet.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ObjectPropertyConditionSet.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.object() == object)
+            return condition;
+    }
+    return ObjectPropertyCondition();
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind(
+    PropertyCondition::Kind kind) const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == kind)
+            return condition;
+    }
+    return ObjectPropertyCondition();
+}
+
+unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const
+{
+    unsigned result = 0;
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == kind)
+            result++;
+    }
+    return result;
+}
+
+bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const
+{
+    return (numberOfConditionsWithKind(PropertyCondition::Presence) == 1) != (numberOfConditionsWithKind(PropertyCondition::Equivalence) == 1);
+}
+
+ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const
+{
+    ObjectPropertyCondition result;
+    unsigned numFound = 0;
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.kind() == PropertyCondition::Presence
+            || condition.kind() == PropertyCondition::Equivalence) {
+            result = condition;
+            numFound++;
+        }
+    }
+    RELEASE_ASSERT(numFound == 1);
+    return result;
+}
+
+ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith(
+    const ObjectPropertyConditionSet& other) const
+{
+    if (!isValid() || !other.isValid())
+        return invalid();
+
+    Vector result;
+    
+    if (!isEmpty())
+        result.appendVector(m_data->vector);
+    
+    for (const ObjectPropertyCondition& newCondition : other) {
+        bool foundMatch = false;
+        for (const ObjectPropertyCondition& existingCondition : *this) {
+            if (newCondition == existingCondition) {
+                foundMatch = true;
+                continue;
+            }
+            if (!newCondition.isCompatibleWith(existingCondition))
+                return invalid();
+        }
+        if (!foundMatch)
+            result.append(newCondition);
+    }
+
+    return create(result);
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidity() const
+{
+    if (!isValid())
+        return false;
+    
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.structureEnsuresValidity())
+            return false;
+    }
+    return true;
+}
+
+bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const
+{
+    if (!isValid())
+        return false;
+    
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint())
+            return false;
+    }
+    return true;
+}
+
+bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (condition.validityRequiresImpurePropertyWatchpoint())
+            return true;
+    }
+    return false;
+}
+
+bool ObjectPropertyConditionSet::areStillLive() const
+{
+    for (const ObjectPropertyCondition& condition : *this) {
+        if (!condition.isStillLive())
+            return false;
+    }
+    return true;
+}
+
+void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!isValid()) {
+        out.print("");
+        return;
+    }
+    
+    out.print("[");
+    if (m_data)
+        out.print(listDumpInContext(m_data->vector, context));
+    out.print("]");
+}
+
+void ObjectPropertyConditionSet::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool ObjectPropertyConditionSet::isValidAndWatchable() const
+{
+    if (!isValid())
+        return false;
+
+    for (ObjectPropertyCondition condition : m_data->vector) {
+        if (!condition.isWatchable())
+            return false;
+    }
+    return true;
+}
+
+namespace {
+
+bool verbose = false;
+
+ObjectPropertyCondition generateCondition(
+    VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind)
+{
+    Structure* structure = object->structure();
+    if (verbose)
+        dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n");
+
+    ObjectPropertyCondition result;
+    switch (conditionKind) {
+    case PropertyCondition::Presence: {
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (offset == invalidOffset)
+            return ObjectPropertyCondition();
+        result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes);
+        break;
+    }
+    case PropertyCondition::Absence: {
+        result = ObjectPropertyCondition::absence(
+            vm, owner, object, uid, object->structure()->storedPrototypeObject());
+        break;
+    }
+    case PropertyCondition::AbsenceOfSetter: {
+        result = ObjectPropertyCondition::absenceOfSetter(
+            vm, owner, object, uid, object->structure()->storedPrototypeObject());
+        break;
+    }
+    case PropertyCondition::Equivalence: {
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (offset == invalidOffset)
+            return ObjectPropertyCondition();
+        JSValue value = object->getDirect(offset);
+        result = ObjectPropertyCondition::equivalence(vm, owner, object, uid, value);
+        break;
+    }
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return ObjectPropertyCondition();
+    }
+
+    if (!result.isStillValidAssumingImpurePropertyWatchpoint()) {
+        if (verbose)
+            dataLog("Failed to create condition: ", result, "\n");
+        return ObjectPropertyCondition();
+    }
+
+    if (verbose)
+        dataLog("New condition: ", result, "\n");
+    return result;
+}
+
+enum Concurrency {
+    MainThread,
+    Concurrent
+};
+template
+ObjectPropertyConditionSet generateConditions(
+    VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor,
+    Concurrency concurrency = MainThread)
+{
+    Vector conditions;
+    
+    for (;;) {
+        if (verbose)
+            dataLog("Considering structure: ", pointerDump(structure), "\n");
+        
+        if (structure->isProxy()) {
+            if (verbose)
+                dataLog("It's a proxy, so invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        JSValue value = structure->prototypeForLookup(globalObject);
+        
+        if (value.isNull()) {
+            if (!prototype) {
+                if (verbose)
+                    dataLog("Reached end of prototype chain as expected, done.\n");
+                break;
+            }
+            if (verbose)
+                dataLog("Unexpectedly reached end of prototype chain, so invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        JSObject* object = jsCast(value);
+        structure = object->structure(vm);
+        
+        if (structure->isDictionary()) {
+            if (concurrency == MainThread) {
+                if (structure->hasBeenFlattenedBefore()) {
+                    if (verbose)
+                        dataLog("Dictionary has been flattened before, so invalid.\n");
+                    return ObjectPropertyConditionSet::invalid();
+                }
+
+                if (verbose)
+                    dataLog("Flattening ", pointerDump(structure));
+                structure->flattenDictionaryStructure(vm, object);
+            } else {
+                if (verbose)
+                    dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n");
+                return ObjectPropertyConditionSet::invalid();
+            }
+        }
+
+        if (!functor(conditions, object)) {
+            if (verbose)
+                dataLog("Functor failed, invalid.\n");
+            return ObjectPropertyConditionSet::invalid();
+        }
+        
+        if (object == prototype) {
+            if (verbose)
+                dataLog("Reached desired prototype, done.\n");
+            break;
+        }
+    }
+
+    if (verbose)
+        dataLog("Returning conditions: ", listDump(conditions), "\n");
+    return ObjectPropertyConditionSet::create(conditions);
+}
+
+} // anonymous namespace
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            PropertyCondition::Kind kind =
+                object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence;
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, kind);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+    VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, exec->lexicalGlobalObject(), headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            if (object == prototype)
+                return true;
+            ObjectPropertyCondition result =
+                generateCondition(vm, owner, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        });
+}
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, JSObject* prototype, UniquedStringImpl* uid)
+{
+    return generateConditions(vm, globalObject, headStructure, prototype,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            PropertyCondition::Kind kind =
+                object == prototype ? PropertyCondition::Equivalence : PropertyCondition::Absence;
+            ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, kind);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, globalObject, headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result = generateCondition(vm, nullptr, object, uid, PropertyCondition::Absence);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+    VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid)
+{
+    return generateConditions(
+        vm, globalObject, headStructure, nullptr,
+        [&] (Vector& conditions, JSObject* object) -> bool {
+            ObjectPropertyCondition result =
+                generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter);
+            if (!result)
+                return false;
+            conditions.append(result);
+            return true;
+        }, Concurrent);
+}
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+    VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid)
+{
+    return generateCondition(vm, owner, object, uid, PropertyCondition::Equivalence);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/ObjectPropertyConditionSet.h b/bytecode/ObjectPropertyConditionSet.h
new file mode 100644
index 0000000..2b15965
--- /dev/null
+++ b/bytecode/ObjectPropertyConditionSet.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+// An object property condition set is used to represent the set of additional conditions
+// that need to be met for some heap access to be valid. The set can have the following
+// interesting states:
+//
+// Empty: There are no special conditions that need to be met.
+// Invalid: The heap access is never valid.
+// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid.
+
+class ObjectPropertyConditionSet {
+public:
+    ObjectPropertyConditionSet() { }
+    
+    static ObjectPropertyConditionSet invalid()
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(new Data());
+        return result;
+    }
+    
+    static ObjectPropertyConditionSet create(const Vector& vector)
+    {
+        if (vector.isEmpty())
+            return ObjectPropertyConditionSet();
+        
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(new Data());
+        result.m_data->vector = vector;
+        return result;
+    }
+    
+    bool isValid() const
+    {
+        return !m_data || !m_data->vector.isEmpty();
+    }
+
+    bool isValidAndWatchable() const;
+    
+    bool isEmpty() const
+    {
+        return !m_data;
+    }
+    
+    typedef const ObjectPropertyCondition* iterator;
+    
+    iterator begin() const
+    {
+        if (!m_data)
+            return nullptr;
+        return m_data->vector.begin();
+    }
+    iterator end() const
+    {
+        if (!m_data)
+            return nullptr;
+        return m_data->vector.end();
+    }
+    
+    ObjectPropertyCondition forObject(JSObject*) const;
+    ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const;
+
+    unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const;
+
+    bool hasOneSlotBaseCondition() const;
+    
+    // If this is a condition set for a prototype hit, then this is guaranteed to return the
+    // condition on the prototype itself. This allows you to get the object, offset, and
+    // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence
+    // in the set, and it will return that presence.
+    ObjectPropertyCondition slotBaseCondition() const;
+    
+    // Attempt to create a new condition set by merging this one with the other one. This will
+    // fail if any of the conditions are incompatible with each other. When if fails, it returns
+    // invalid().
+    ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const;
+    
+    bool structuresEnsureValidity() const;
+    bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const;
+    
+    bool needImpurePropertyWatchpoint() const;
+    bool areStillLive() const;
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    // Helpers for using this in a union.
+    void* releaseRawPointer()
+    {
+        return static_cast(m_data.leakRef());
+    }
+    static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer)
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = adoptRef(static_cast(rawPointer));
+        return result;
+    }
+    static ObjectPropertyConditionSet fromRawPointer(void* rawPointer)
+    {
+        ObjectPropertyConditionSet result;
+        result.m_data = static_cast(rawPointer);
+        return result;
+    }
+
+    // FIXME: Everything below here should be private, but cannot be because of a bug in VS.
+    
+    // Internally, this represents Invalid using a pointer to a Data that has an empty vector.
+    
+    // FIXME: This could be made more compact by having it internally use a vector that just has
+    // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all
+    // of the APIs supply the uid.
+    
+    class Data : public ThreadSafeRefCounted {
+        WTF_MAKE_NONCOPYABLE(Data);
+        WTF_MAKE_FAST_ALLOCATED;
+        
+    public:
+        Data() { }
+        
+        Vector vector;
+    };
+    
+private:
+    RefPtr m_data;
+};
+
+ObjectPropertyCondition generateConditionForSelfEquivalence(
+    VM&, JSCell* owner, JSObject* object, UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPropertyMiss(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMiss(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom(
+    VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+
+ObjectPropertyConditionSet generateConditionsForPrototypeEquivalenceConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, JSObject* prototype,
+    UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertyMissConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently(
+    VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid);
+
+} // namespace JSC
diff --git a/bytecode/Opcode.cpp b/bytecode/Opcode.cpp
new file mode 100644
index 0000000..0d16dfc
--- /dev/null
+++ b/bytecode/Opcode.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Opcode.h"
+
+#include 
+
+#if ENABLE(OPCODE_STATS)
+#include 
+#include 
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+const char* const opcodeNames[] = {
+#define OPCODE_NAME_ENTRY(opcode, size) #opcode,
+    FOR_EACH_OPCODE_ID(OPCODE_NAME_ENTRY)
+#undef OPCODE_NAME_ENTRY
+};
+
+#if ENABLE(OPCODE_STATS)
+
+long long OpcodeStats::opcodeCounts[numOpcodeIDs];
+long long OpcodeStats::opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
+int OpcodeStats::lastOpcode = -1;
+
+static OpcodeStats logger;
+
+OpcodeStats::OpcodeStats()
+{
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        opcodeCounts[i] = 0;
+    
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        for (int j = 0; j < numOpcodeIDs; ++j)
+            opcodePairCounts[i][j] = 0;
+}
+
+static int compareOpcodeIndices(const void* left, const void* right)
+{
+    long long leftValue = OpcodeStats::opcodeCounts[*(int*) left];
+    long long rightValue = OpcodeStats::opcodeCounts[*(int*) right];
+    
+    if (leftValue < rightValue)
+        return 1;
+    else if (leftValue > rightValue)
+        return -1;
+    else
+        return 0;
+}
+
+static int compareOpcodePairIndices(const void* left, const void* right)
+{
+    std::pair leftPair = *(pair*) left;
+    long long leftValue = OpcodeStats::opcodePairCounts[leftPair.first][leftPair.second];
+    std::pair rightPair = *(pair*) right;
+    long long rightValue = OpcodeStats::opcodePairCounts[rightPair.first][rightPair.second];
+    
+    if (leftValue < rightValue)
+        return 1;
+    else if (leftValue > rightValue)
+        return -1;
+    else
+        return 0;
+}
+
+OpcodeStats::~OpcodeStats()
+{
+    long long totalInstructions = 0;
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        totalInstructions += opcodeCounts[i];
+    
+    long long totalInstructionPairs = 0;
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        for (int j = 0; j < numOpcodeIDs; ++j)
+            totalInstructionPairs += opcodePairCounts[i][j];
+
+    std::array sortedIndices;
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        sortedIndices[i] = i;
+    qsort(sortedIndices.data(), numOpcodeIDs, sizeof(int), compareOpcodeIndices);
+    
+    std::pair sortedPairIndices[numOpcodeIDs * numOpcodeIDs];
+    std::pair* currentPairIndex = sortedPairIndices;
+    for (int i = 0; i < numOpcodeIDs; ++i)
+        for (int j = 0; j < numOpcodeIDs; ++j)
+            *(currentPairIndex++) = std::make_pair(i, j);
+    qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(std::pair), compareOpcodePairIndices);
+    
+    dataLogF("\nExecuted opcode statistics\n"); 
+    
+    dataLogF("Total instructions executed: %lld\n\n", totalInstructions);
+
+    dataLogF("All opcodes by frequency:\n\n");
+
+    for (int i = 0; i < numOpcodeIDs; ++i) {
+        int index = sortedIndices[i];
+        dataLogF("%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCounts[index], ((double) opcodeCounts[index]) / ((double) totalInstructions) * 100.0);    
+    }
+    
+    dataLogF("\n");
+    dataLogF("2-opcode sequences by frequency: %lld\n\n", totalInstructions);
+    
+    for (int i = 0; i < numOpcodeIDs * numOpcodeIDs; ++i) {
+        std::pair indexPair = sortedPairIndices[i];
+        long long count = opcodePairCounts[indexPair.first][indexPair.second];
+        
+        if (!count)
+            break;
+        
+        dataLogF("%s%s %s:%s %lld %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), count, ((double) count) / ((double) totalInstructionPairs) * 100.0);
+    }
+    
+    dataLogF("\n");
+    dataLogF("Most common opcodes and sequences:\n");
+
+    for (int i = 0; i < numOpcodeIDs; ++i) {
+        int index = sortedIndices[i];
+        long long opcodeCount = opcodeCounts[index];
+        double opcodeProportion = ((double) opcodeCount) / ((double) totalInstructions);
+        if (opcodeProportion < 0.0001)
+            break;
+        dataLogF("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0);
+
+        for (int j = 0; j < numOpcodeIDs * numOpcodeIDs; ++j) {
+            std::pair indexPair = sortedPairIndices[j];
+            long long pairCount = opcodePairCounts[indexPair.first][indexPair.second];
+            double pairProportion = ((double) pairCount) / ((double) totalInstructionPairs);
+        
+            if (!pairCount || pairProportion < 0.0001 || pairProportion < opcodeProportion / 100)
+                break;
+
+            if (indexPair.first != index && indexPair.second != index)
+                continue;
+
+            dataLogF("    %s%s %s:%s %lld - %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), pairCount, pairProportion * 100.0);
+        }
+        
+    }
+    dataLogF("\n");
+}
+
+void OpcodeStats::recordInstruction(int opcode)
+{
+    opcodeCounts[opcode]++;
+    
+    if (lastOpcode != -1)
+        opcodePairCounts[lastOpcode][opcode]++;
+    
+    lastOpcode = opcode;
+}
+
+void OpcodeStats::resetLastInstruction()
+{
+    lastOpcode = -1;
+}
+
+#endif
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, OpcodeID opcode)
+{
+    out.print(opcodeNames[opcode]);
+}
+
+} // namespace WTF
diff --git a/bytecode/Opcode.h b/bytecode/Opcode.h
new file mode 100644
index 0000000..41c8509
--- /dev/null
+++ b/bytecode/Opcode.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2008, 2009, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Bytecodes.h"
+#include "LLIntOpcode.h"
+
+#include 
+#include 
+
+#include 
+
+namespace JSC {
+
+#define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \
+    FOR_EACH_BYTECODE_ID(macro) \
+    extension__
+
+#define FOR_EACH_CORE_OPCODE_ID(macro) \
+    FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ )
+
+#define FOR_EACH_OPCODE_ID(macro) \
+    FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION( \
+        macro, \
+        FOR_EACH_LLINT_OPCODE_EXTENSION(macro) \
+    )
+
+
+#define OPCODE_ID_ENUM(opcode, length) opcode,
+    enum OpcodeID : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) };
+#undef OPCODE_ID_ENUM
+
+const int maxOpcodeLength = 9;
+#if !ENABLE(JIT)
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#else
+const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS;
+#endif
+
+#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
+    FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
+#undef OPCODE_ID_LENGTHS
+
+#define OPCODE_LENGTH(opcode) opcode##_length
+
+#define OPCODE_ID_LENGTH_MAP(opcode, length) length,
+    const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
+#undef OPCODE_ID_LENGTH_MAP
+
+#if COMPILER(GCC)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+#endif
+
+#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+    FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
+#undef VERIFY_OPCODE_ID
+
+#if COMPILER(GCC)
+#pragma GCC diagnostic pop
+#endif
+
+#if ENABLE(COMPUTED_GOTO_OPCODES)
+typedef void* Opcode;
+#else
+typedef OpcodeID Opcode;
+#endif
+
+#define PADDING_STRING "                                "
+#define PADDING_STRING_LENGTH static_cast(strlen(PADDING_STRING))
+
+extern const char* const opcodeNames[];
+
+inline const char* padOpcodeName(OpcodeID op, unsigned width)
+{
+    unsigned pad = width - strlen(opcodeNames[op]);
+    pad = std::min(pad, PADDING_STRING_LENGTH);
+    return PADDING_STRING + PADDING_STRING_LENGTH - pad;
+}
+
+#undef PADDING_STRING_LENGTH
+#undef PADDING_STRING
+
+#if ENABLE(OPCODE_STATS)
+
+struct OpcodeStats {
+    OpcodeStats();
+    ~OpcodeStats();
+    static long long opcodeCounts[numOpcodeIDs];
+    static long long opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
+    static int lastOpcode;
+    
+    static void recordInstruction(int opcode);
+    static void resetLastInstruction();
+};
+
+#endif
+
+inline size_t opcodeLength(OpcodeID opcode)
+{
+    switch (opcode) {
+#define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id);
+         FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)
+#undef OPCODE_ID_LENGTHS
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return 0;
+}
+
+inline bool isBranch(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_jmp:
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+    case op_jneq_ptr:
+    case op_jless:
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+    case op_switch_imm:
+    case op_switch_char:
+    case op_switch_string:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isUnconditionalBranch(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_jmp:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isTerminal(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_ret:
+    case op_end:
+        return true;
+    default:
+        return false;
+    }
+}
+
+inline bool isThrow(OpcodeID opcodeID)
+{
+    switch (opcodeID) {
+    case op_throw:
+    case op_throw_static_error:
+        return true;
+    default:
+        return false;
+    }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::OpcodeID);
+
+} // namespace WTF
diff --git a/bytecode/Operands.h b/bytecode/Operands.h
new file mode 100644
index 0000000..1028798
--- /dev/null
+++ b/bytecode/Operands.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2011, 2012, 2013, 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallFrame.h"
+#include "JSObject.h"
+#include "VirtualRegister.h"
+
+#include 
+#include 
+
+namespace JSC {
+
+template struct OperandValueTraits;
+
+enum OperandKind { ArgumentOperand, LocalOperand };
+
+enum OperandsLikeTag { OperandsLike };
+
+template
+class Operands {
+public:
+    Operands() { }
+    
+    explicit Operands(size_t numArguments, size_t numLocals)
+    {
+        if (WTF::VectorTraits::needsInitialization) {
+            m_arguments.resize(numArguments);
+            m_locals.resize(numLocals);
+        } else {
+            m_arguments.fill(T(), numArguments);
+            m_locals.fill(T(), numLocals);
+        }
+    }
+
+    explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue)
+    {
+        m_arguments.fill(initialValue, numArguments);
+        m_locals.fill(initialValue, numLocals);
+    }
+    
+    template
+    explicit Operands(OperandsLikeTag, const Operands& other)
+    {
+        m_arguments.fill(T(), other.numberOfArguments());
+        m_locals.fill(T(), other.numberOfLocals());
+    }
+    
+    size_t numberOfArguments() const { return m_arguments.size(); }
+    size_t numberOfLocals() const { return m_locals.size(); }
+    
+    T& argument(size_t idx) { return m_arguments[idx]; }
+    const T& argument(size_t idx) const { return m_arguments[idx]; }
+    
+    T& local(size_t idx) { return m_locals[idx]; }
+    const T& local(size_t idx) const { return m_locals[idx]; }
+    
+    template
+    size_t sizeFor() const
+    {
+        if (operandKind == ArgumentOperand)
+            return numberOfArguments();
+        return numberOfLocals();
+    }
+    template
+    T& atFor(size_t idx)
+    {
+        if (operandKind == ArgumentOperand)
+            return argument(idx);
+        return local(idx);
+    }
+    template
+    const T& atFor(size_t idx) const
+    {
+        if (operandKind == ArgumentOperand)
+            return argument(idx);
+        return local(idx);
+    }
+    
+    void ensureLocals(size_t size)
+    {
+        if (size <= m_locals.size())
+            return;
+
+        size_t oldSize = m_locals.size();
+        m_locals.resize(size);
+        if (!WTF::VectorTraits::needsInitialization) {
+            for (size_t i = oldSize; i < m_locals.size(); ++i)
+                m_locals[i] = T();
+        }
+    }
+
+    void ensureLocals(size_t size, const T& ensuredValue)
+    {
+        if (size <= m_locals.size())
+            return;
+
+        size_t oldSize = m_locals.size();
+        m_locals.resize(size);
+        for (size_t i = oldSize; i < m_locals.size(); ++i)
+            m_locals[i] = ensuredValue;
+    }
+    
+    void setLocal(size_t idx, const T& value)
+    {
+        ensureLocals(idx + 1);
+        
+        m_locals[idx] = value;
+    }
+    
+    T getLocal(size_t idx)
+    {
+        if (idx >= m_locals.size())
+            return T();
+        return m_locals[idx];
+    }
+    
+    void setArgumentFirstTime(size_t idx, const T& value)
+    {
+        ASSERT(m_arguments[idx] == T());
+        argument(idx) = value;
+    }
+    
+    void setLocalFirstTime(size_t idx, const T& value)
+    {
+        ASSERT(idx >= m_locals.size() || m_locals[idx] == T());
+        setLocal(idx, value);
+    }
+    
+    T& operand(int operand)
+    {
+        if (operandIsArgument(operand)) {
+            int argument = VirtualRegister(operand).toArgument();
+            return m_arguments[argument];
+        }
+
+        return m_locals[VirtualRegister(operand).toLocal()];
+    }
+
+    T& operand(VirtualRegister virtualRegister)
+    {
+        return operand(virtualRegister.offset());
+    }
+
+    const T& operand(int operand) const { return const_cast(const_cast(this)->operand(operand)); }
+    const T& operand(VirtualRegister operand) const { return const_cast(const_cast(this)->operand(operand)); }
+    
+    bool hasOperand(int operand) const
+    {
+        if (operandIsArgument(operand))
+            return true;
+        return static_cast(VirtualRegister(operand).toLocal()) < numberOfLocals();
+    }
+    bool hasOperand(VirtualRegister reg) const
+    {
+        return hasOperand(reg.offset());
+    }
+    
+    void setOperand(int operand, const T& value)
+    {
+        if (operandIsArgument(operand)) {
+            int argument = VirtualRegister(operand).toArgument();
+            m_arguments[argument] = value;
+            return;
+        }
+        
+        setLocal(VirtualRegister(operand).toLocal(), value);
+    }
+    
+    void setOperand(VirtualRegister virtualRegister, const T& value)
+    {
+        setOperand(virtualRegister.offset(), value);
+    }
+
+    size_t size() const { return numberOfArguments() + numberOfLocals(); }
+    const T& at(size_t index) const
+    {
+        if (index < numberOfArguments())
+            return m_arguments[index];
+        return m_locals[index - numberOfArguments()];
+    }
+    T& at(size_t index)
+    {
+        if (index < numberOfArguments())
+            return m_arguments[index];
+        return m_locals[index - numberOfArguments()];
+    }
+    const T& operator[](size_t index) const { return at(index); }
+    T& operator[](size_t index) { return at(index); }
+
+    bool isArgument(size_t index) const { return index < numberOfArguments(); }
+    bool isVariable(size_t index) const { return !isArgument(index); }
+    int argumentForIndex(size_t index) const
+    {
+        return index;
+    }
+    int variableForIndex(size_t index) const
+    {
+        return index - m_arguments.size();
+    }
+    int operandForIndex(size_t index) const
+    {
+        if (index < numberOfArguments())
+            return virtualRegisterForArgument(index).offset();
+        return virtualRegisterForLocal(index - numberOfArguments()).offset();
+    }
+    VirtualRegister virtualRegisterForIndex(size_t index) const
+    {
+        return VirtualRegister(operandForIndex(index));
+    }
+    size_t indexForOperand(int operand) const
+    {
+        if (operandIsArgument(operand))
+            return static_cast(VirtualRegister(operand).toArgument());
+        return static_cast(VirtualRegister(operand).toLocal()) + numberOfArguments();
+    }
+    size_t indexForOperand(VirtualRegister reg) const
+    {
+        return indexForOperand(reg.offset());
+    }
+    
+    void setOperandFirstTime(int operand, const T& value)
+    {
+        if (operandIsArgument(operand)) {
+            setArgumentFirstTime(VirtualRegister(operand).toArgument(), value);
+            return;
+        }
+        
+        setLocalFirstTime(VirtualRegister(operand).toLocal(), value);
+    }
+    
+    void fill(T value)
+    {
+        for (size_t i = 0; i < m_arguments.size(); ++i)
+            m_arguments[i] = value;
+        for (size_t i = 0; i < m_locals.size(); ++i)
+            m_locals[i] = value;
+    }
+    
+    void clear()
+    {
+        fill(T());
+    }
+    
+    bool operator==(const Operands& other) const
+    {
+        ASSERT(numberOfArguments() == other.numberOfArguments());
+        ASSERT(numberOfLocals() == other.numberOfLocals());
+        
+        return m_arguments == other.m_arguments && m_locals == other.m_locals;
+    }
+    
+    void dumpInContext(PrintStream& out, DumpContext* context) const;
+    void dump(PrintStream& out) const;
+    
+private:
+    Vector m_arguments;
+    Vector m_locals;
+};
+
+} // namespace JSC
diff --git a/bytecode/OperandsInlines.h b/bytecode/OperandsInlines.h
new file mode 100644
index 0000000..65fedda
--- /dev/null
+++ b/bytecode/OperandsInlines.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Operands.h"
+#include 
+
+namespace JSC {
+
+template
+void Operands::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    CommaPrinter comma(" ");
+    for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
+        if (!argument(argumentIndex))
+            continue;
+        out.print(comma, "arg", argumentIndex, ":", inContext(argument(argumentIndex), context));
+    }
+    for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
+        if (!local(localIndex))
+            continue;
+        out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context));
+    }
+}
+
+template
+void Operands::dump(PrintStream& out) const
+{
+    CommaPrinter comma(" ");
+    for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) {
+        if (!argument(argumentIndex))
+            continue;
+        out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex));
+    }
+    for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) {
+        if (!local(localIndex))
+            continue;
+        out.print(comma, "loc", localIndex, ":", local(localIndex));
+    }
+}
+
+} // namespace JSC
diff --git a/bytecode/PolymorphicAccess.cpp b/bytecode/PolymorphicAccess.cpp
new file mode 100644
index 0000000..fae6081
--- /dev/null
+++ b/bytecode/PolymorphicAccess.cpp
@@ -0,0 +1,2061 @@
+/*
+ * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PolymorphicAccess.h"
+
+#if ENABLE(JIT)
+
+#include "BinarySwitch.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+#include "DOMJITAccessCasePatchpointParams.h"
+#include "DOMJITCallDOMGetterPatchpoint.h"
+#include "DirectArguments.h"
+#include "GetterSetter.h"
+#include "Heap.h"
+#include "JITOperations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "ScopedArguments.h"
+#include "ScratchRegisterAllocator.h"
+#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
+#include 
+#include 
+
+namespace JSC {
+
+static const bool verbose = false;
+
+void AccessGenerationResult::dump(PrintStream& out) const
+{
+    out.print(m_kind);
+    if (m_code)
+        out.print(":", m_code);
+}
+
+Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
+{
+    return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+        watchpoints, jit->codeBlock(), stubInfo, condition);
+}
+
+void AccessGenerationState::restoreScratch()
+{
+    allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
+}
+
+void AccessGenerationState::succeed()
+{
+    restoreScratch();
+    success.append(jit->jump());
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersForCall()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+    return m_liveRegistersForCall;
+}
+
+const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+    return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+}
+
+static RegisterSet calleeSaveRegisters()
+{
+    RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
+    result.filter(RegisterSet::registersToNotSaveForCCall());
+    return result;
+}
+
+const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling) {
+        m_calculatedRegistersForCallAndExceptionHandling = true;
+
+        m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
+        m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
+        if (m_needsToRestoreRegistersIfException)
+            RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
+
+        m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
+        m_liveRegistersForCall.exclude(calleeSaveRegisters());
+    }
+    return m_liveRegistersForCall;
+}
+
+auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
+{
+    RegisterSet liveRegisters = liveRegistersForCall();
+    liveRegisters.merge(extra);
+    
+    unsigned extraStackPadding = 0;
+    unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
+    return SpillState {
+        WTFMove(liveRegisters),
+        numberOfStackBytesUsedForRegisterPreservation
+    };
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
+{
+    // Even if we're a getter, we don't want to ignore the result value like we normally do
+    // because the getter threw, and therefore, didn't return a value that means anything.
+    // Instead, we want to restore that register to what it was upon entering the getter
+    // inline cache. The subtlety here is if the base and the result are the same register,
+    // and the getter threw, we want OSR exit to see the original base value, not the result
+    // of the getter call.
+    RegisterSet dontRestore = spillState.spilledRegisters;
+    // As an optimization here, we only need to restore what is live for exception handling.
+    // We can construct the dontRestore set to accomplish this goal by having it contain only
+    // what is live for call but not live for exception handling. By ignoring things that are
+    // only live at the call but not the exception handler, we will only restore things live
+    // at the exception handler.
+    dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
+    restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+}
+
+void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
+{
+    unsigned extraStackPadding = 0;
+    ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
+}
+
+CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+
+    if (!m_calculatedCallSiteIndex) {
+        m_calculatedCallSiteIndex = true;
+
+        if (m_needsToRestoreRegistersIfException)
+            m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
+        else
+            m_callSiteIndex = originalCallSiteIndex();
+    }
+
+    return m_callSiteIndex;
+}
+
+const HandlerInfo& AccessGenerationState::originalExceptionHandler()
+{
+    if (!m_calculatedRegistersForCallAndExceptionHandling)
+        calculateLiveRegistersForCallAndExceptionHandling();
+
+    RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+    HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
+    RELEASE_ASSERT(exceptionHandler);
+    return *exceptionHandler;
+}
+
+CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
+
+void AccessGenerationState::emitExplicitExceptionHandler()
+{
+    restoreScratch();
+    jit->copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+    if (needsToRestoreRegistersIfException()) {
+        // To the JIT that produces the original exception handling
+        // call site, they will expect the OSR exit to be arrived
+        // at from genericUnwind. Therefore we must model what genericUnwind
+        // does here. I.e, set callFrameForCatch and copy callee saves.
+
+        jit->storePtr(GPRInfo::callFrameRegister, jit->vm()->addressOfCallFrameForCatch());
+        CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
+
+        // We don't need to insert a new exception handler in the table
+        // because we're doing a manual exception check here. i.e, we'll
+        // never arrive here from genericUnwind().
+        HandlerInfo originalHandler = originalExceptionHandler();
+        jit->addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
+            });
+    } else {
+        jit->setupArguments(CCallHelpers::TrustedImmPtr(jit->vm()), GPRInfo::callFrameRegister);
+        CCallHelpers::Call lookupExceptionHandlerCall = jit->call();
+        jit->addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+            });
+        jit->jumpToExceptionHandler();
+    }
+}
+
+AccessCase::AccessCase()
+{
+}
+
+std::unique_ptr AccessCase::tryGet(
+    VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
+    const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = type;
+    result->m_offset = offset;
+    result->m_structure.set(vm, owner, structure);
+    result->m_conditionSet = conditionSet;
+
+    if (viaProxy || additionalSet) {
+        result->m_rareData = std::make_unique();
+        result->m_rareData->viaProxy = viaProxy;
+        result->m_rareData->additionalSet = additionalSet;
+    }
+
+    return result;
+}
+
+std::unique_ptr AccessCase::get(
+    VM& vm, JSCell* owner, AccessType type, PropertyOffset offset, Structure* structure,
+    const ObjectPropertyConditionSet& conditionSet, bool viaProxy, WatchpointSet* additionalSet,
+    PropertySlot::GetValueFunc customGetter, JSObject* customSlotBase, DOMJIT::GetterSetter* domJIT)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = type;
+    result->m_offset = offset;
+    result->m_structure.set(vm, owner, structure);
+    result->m_conditionSet = conditionSet;
+
+    if (viaProxy || additionalSet || result->doesCalls() || customGetter || customSlotBase || domJIT) {
+        result->m_rareData = std::make_unique();
+        result->m_rareData->viaProxy = viaProxy;
+        result->m_rareData->additionalSet = additionalSet;
+        result->m_rareData->customAccessor.getter = customGetter;
+        result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+        result->m_rareData->domJIT = domJIT;
+    }
+
+    return result;
+}
+
+std::unique_ptr AccessCase::megamorphicLoad(VM& vm, JSCell* owner)
+{
+    UNUSED_PARAM(vm);
+    UNUSED_PARAM(owner);
+    
+    if (GPRInfo::numberOfRegisters < 9)
+        return nullptr;
+    
+    std::unique_ptr result(new AccessCase());
+    
+    result->m_type = MegamorphicLoad;
+    
+    return result;
+}
+
+std::unique_ptr AccessCase::replace(
+    VM& vm, JSCell* owner, Structure* structure, PropertyOffset offset)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = Replace;
+    result->m_offset = offset;
+    result->m_structure.set(vm, owner, structure);
+
+    return result;
+}
+
+std::unique_ptr AccessCase::transition(
+    VM& vm, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet)
+{
+    RELEASE_ASSERT(oldStructure == newStructure->previousID());
+
+    // Skip optimizing the case where we need a realloc, if we don't have
+    // enough registers to make it happen.
+    if (GPRInfo::numberOfRegisters < 6
+        && oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
+        && oldStructure->outOfLineCapacity()) {
+        return nullptr;
+    }
+
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = Transition;
+    result->m_offset = offset;
+    result->m_structure.set(vm, owner, newStructure);
+    result->m_conditionSet = conditionSet;
+
+    return result;
+}
+
+std::unique_ptr AccessCase::setter(
+    VM& vm, JSCell* owner, AccessType type, Structure* structure, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet, PutPropertySlot::PutValueFunc customSetter,
+    JSObject* customSlotBase)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = type;
+    result->m_offset = offset;
+    result->m_structure.set(vm, owner, structure);
+    result->m_conditionSet = conditionSet;
+    result->m_rareData = std::make_unique();
+    result->m_rareData->customAccessor.setter = customSetter;
+    result->m_rareData->customSlotBase.setMayBeNull(vm, owner, customSlotBase);
+
+    return result;
+}
+
+std::unique_ptr AccessCase::in(
+    VM& vm, JSCell* owner, AccessType type, Structure* structure,
+    const ObjectPropertyConditionSet& conditionSet)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = type;
+    result->m_structure.set(vm, owner, structure);
+    result->m_conditionSet = conditionSet;
+
+    return result;
+}
+
+std::unique_ptr AccessCase::getLength(VM&, JSCell*, AccessType type)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = type;
+
+    return result;
+}
+
+std::unique_ptr AccessCase::getIntrinsic(
+    VM& vm, JSCell* owner, JSFunction* getter, PropertyOffset offset,
+    Structure* structure, const ObjectPropertyConditionSet& conditionSet)
+{
+    std::unique_ptr result(new AccessCase());
+
+    result->m_type = IntrinsicGetter;
+    result->m_structure.set(vm, owner, structure);
+    result->m_conditionSet = conditionSet;
+    result->m_offset = offset;
+
+    result->m_rareData = std::make_unique();
+    result->m_rareData->intrinsicFunction.set(vm, owner, getter);
+
+    return result;
+}
+
+AccessCase::~AccessCase()
+{
+}
+
+std::unique_ptr AccessCase::fromStructureStubInfo(
+    VM& vm, JSCell* owner, StructureStubInfo& stubInfo)
+{
+    switch (stubInfo.cacheType) {
+    case CacheType::GetByIdSelf:
+        return get(
+            vm, owner, Load, stubInfo.u.byIdSelf.offset,
+            stubInfo.u.byIdSelf.baseObjectStructure.get());
+
+    case CacheType::PutByIdReplace:
+        return replace(
+            vm, owner, stubInfo.u.byIdSelf.baseObjectStructure.get(), stubInfo.u.byIdSelf.offset);
+
+    default:
+        return nullptr;
+    }
+}
+
+std::unique_ptr AccessCase::clone() const
+{
+    std::unique_ptr result(new AccessCase());
+    result->m_type = m_type;
+    result->m_offset = m_offset;
+    result->m_structure = m_structure;
+    result->m_conditionSet = m_conditionSet;
+    if (RareData* rareData = m_rareData.get()) {
+        result->m_rareData = std::make_unique();
+        result->m_rareData->viaProxy = rareData->viaProxy;
+        result->m_rareData->additionalSet = rareData->additionalSet;
+        // NOTE: We don't copy the callLinkInfo, since that's created during code generation.
+        result->m_rareData->customAccessor.opaque = rareData->customAccessor.opaque;
+        result->m_rareData->customSlotBase = rareData->customSlotBase;
+        result->m_rareData->intrinsicFunction = rareData->intrinsicFunction;
+        result->m_rareData->domJIT = rareData->domJIT;
+    }
+    return result;
+}
+
+Vector AccessCase::commit(VM& vm, const Identifier& ident)
+{
+    // It's fine to commit something that is already committed. That arises when we switch to using
+    // newly allocated watchpoints. When it happens, it's not efficient - but we think that's OK
+    // because most AccessCases have no extra watchpoints anyway.
+    RELEASE_ASSERT(m_state == Primordial || m_state == Committed);
+    
+    Vector result;
+    
+    if ((structure() && structure()->needImpurePropertyWatchpoint())
+        || m_conditionSet.needImpurePropertyWatchpoint())
+        result.append(vm.ensureWatchpointSetForImpureProperty(ident));
+
+    if (additionalSet())
+        result.append(additionalSet());
+    
+    m_state = Committed;
+    
+    return result;
+}
+
+bool AccessCase::guardedByStructureCheck() const
+{
+    if (viaProxy())
+        return false;
+
+    switch (m_type) {
+    case MegamorphicLoad:
+    case ArrayLength:
+    case StringLength:
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+        return false;
+    default:
+        return true;
+    }
+}
+
+JSObject* AccessCase::alternateBase() const
+{
+    if (customSlotBase())
+        return customSlotBase();
+    return conditionSet().slotBaseCondition().object();
+}
+
+bool AccessCase::doesCalls(Vector* cellsToMark) const
+{
+    switch (type()) {
+    case Getter:
+    case Setter:
+    case CustomValueGetter:
+    case CustomAccessorGetter:
+    case CustomValueSetter:
+    case CustomAccessorSetter:
+        return true;
+    case Transition:
+        if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()
+            && structure()->couldHaveIndexingHeader()) {
+            if (cellsToMark)
+                cellsToMark->append(newStructure());
+            return true;
+        }
+        return false;
+    default:
+        return false;
+    }
+}
+
+bool AccessCase::couldStillSucceed() const
+{
+    return m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint();
+}
+
+bool AccessCase::canBeReplacedByMegamorphicLoad() const
+{
+    if (type() == MegamorphicLoad)
+        return true;
+    
+    return type() == Load
+        && !viaProxy()
+        && conditionSet().isEmpty()
+        && !additionalSet()
+        && !customSlotBase();
+}
+
+bool AccessCase::canReplace(const AccessCase& other) const
+{
+    // This puts in a good effort to try to figure out if 'other' is made superfluous by '*this'.
+    // It's fine for this to return false if it's in doubt.
+
+    switch (type()) {
+    case MegamorphicLoad:
+        return other.canBeReplacedByMegamorphicLoad();
+    case ArrayLength:
+    case StringLength:
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+        return other.type() == type();
+    default:
+        if (!guardedByStructureCheck() || !other.guardedByStructureCheck())
+            return false;
+        
+        return structure() == other.structure();
+    }
+}
+
+void AccessCase::dump(PrintStream& out) const
+{
+    out.print(m_type, ":(");
+
+    CommaPrinter comma;
+    
+    out.print(comma, m_state);
+
+    if (m_type == Transition)
+        out.print(comma, "structure = ", pointerDump(structure()), " -> ", pointerDump(newStructure()));
+    else if (m_structure)
+        out.print(comma, "structure = ", pointerDump(m_structure.get()));
+
+    if (isValidOffset(m_offset))
+        out.print(comma, "offset = ", m_offset);
+    if (!m_conditionSet.isEmpty())
+        out.print(comma, "conditions = ", m_conditionSet);
+
+    if (RareData* rareData = m_rareData.get()) {
+        if (rareData->viaProxy)
+            out.print(comma, "viaProxy = ", rareData->viaProxy);
+        if (rareData->additionalSet)
+            out.print(comma, "additionalSet = ", RawPointer(rareData->additionalSet.get()));
+        if (rareData->callLinkInfo)
+            out.print(comma, "callLinkInfo = ", RawPointer(rareData->callLinkInfo.get()));
+        if (rareData->customAccessor.opaque)
+            out.print(comma, "customAccessor = ", RawPointer(rareData->customAccessor.opaque));
+        if (rareData->customSlotBase)
+            out.print(comma, "customSlotBase = ", RawPointer(rareData->customSlotBase.get()));
+    }
+
+    out.print(")");
+}
+
+bool AccessCase::visitWeak(VM& vm) const
+{
+    if (m_structure && !Heap::isMarked(m_structure.get()))
+        return false;
+    if (!m_conditionSet.areStillLive())
+        return false;
+    if (m_rareData) {
+        if (m_rareData->callLinkInfo)
+            m_rareData->callLinkInfo->visitWeak(vm);
+        if (m_rareData->customSlotBase && !Heap::isMarked(m_rareData->customSlotBase.get()))
+            return false;
+        if (m_rareData->intrinsicFunction && !Heap::isMarked(m_rareData->intrinsicFunction.get()))
+            return false;
+    }
+    return true;
+}
+
+bool AccessCase::propagateTransitions(SlotVisitor& visitor) const
+{
+    bool result = true;
+    
+    if (m_structure)
+        result &= m_structure->markIfCheap(visitor);
+    
+    switch (m_type) {
+    case Transition:
+        if (Heap::isMarkedConcurrently(m_structure->previousID()))
+            visitor.appendUnbarriered(m_structure.get());
+        else
+            result = false;
+        break;
+    default:
+        break;
+    }
+    
+    return result;
+}
+
+void AccessCase::generateWithGuard(
+    AccessGenerationState& state, CCallHelpers::JumpList& fallThrough)
+{
+    SuperSamplerScope superSamplerScope(false);
+
+    RELEASE_ASSERT(m_state == Committed);
+    m_state = Generated;
+    
+    CCallHelpers& jit = *state.jit;
+    VM& vm = *jit.vm();
+    const Identifier& ident = *state.ident;
+    StructureStubInfo& stubInfo = *state.stubInfo;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+    
+    UNUSED_PARAM(vm);
+
+    switch (m_type) {
+    case ArrayLength: {
+        ASSERT(!viaProxy());
+        jit.load8(CCallHelpers::Address(baseGPR, JSCell::indexingTypeAndMiscOffset()), scratchGPR);
+        fallThrough.append(
+            jit.branchTest32(
+                CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IsArray)));
+        fallThrough.append(
+            jit.branchTest32(
+                CCallHelpers::Zero, scratchGPR, CCallHelpers::TrustedImm32(IndexingShapeMask)));
+        break;
+    }
+
+    case StringLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(StringType)));
+        break;
+    }
+        
+    case DirectArgumentsLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(DirectArgumentsType)));
+
+        fallThrough.append(
+            jit.branchTestPtr(
+                CCallHelpers::NonZero,
+                CCallHelpers::Address(baseGPR, DirectArguments::offsetOfOverrides())));
+        jit.load32(
+            CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
+            valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+        
+    case ScopedArgumentsLength: {
+        ASSERT(!viaProxy());
+        fallThrough.append(
+            jit.branch8(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                CCallHelpers::TrustedImm32(ScopedArgumentsType)));
+
+        fallThrough.append(
+            jit.branchTest8(
+                CCallHelpers::NonZero,
+                CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfOverrodeThings())));
+        jit.load32(
+            CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
+            valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+        
+    case MegamorphicLoad: {
+        UniquedStringImpl* key = ident.impl();
+        unsigned hash = IdentifierRepHash::hash(key);
+        
+        ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+        allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+        allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+        allocator.lock(valueRegs);
+        allocator.lock(scratchGPR);
+        
+        GPRReg intermediateGPR = scratchGPR;
+        GPRReg maskGPR = allocator.allocateScratchGPR();
+        GPRReg maskedHashGPR = allocator.allocateScratchGPR();
+        GPRReg indexGPR = allocator.allocateScratchGPR();
+        GPRReg offsetGPR = allocator.allocateScratchGPR();
+        
+        if (verbose) {
+            dataLog("baseGPR = ", baseGPR, "\n");
+            dataLog("valueRegs = ", valueRegs, "\n");
+            dataLog("scratchGPR = ", scratchGPR, "\n");
+            dataLog("intermediateGPR = ", intermediateGPR, "\n");
+            dataLog("maskGPR = ", maskGPR, "\n");
+            dataLog("maskedHashGPR = ", maskedHashGPR, "\n");
+            dataLog("indexGPR = ", indexGPR, "\n");
+            dataLog("offsetGPR = ", offsetGPR, "\n");
+        }
+
+        ScratchRegisterAllocator::PreservedState preservedState =
+            allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+        CCallHelpers::JumpList myFailAndIgnore;
+        CCallHelpers::JumpList myFallThrough;
+        
+        jit.emitLoadStructure(baseGPR, intermediateGPR, maskGPR);
+        jit.loadPtr(
+            CCallHelpers::Address(intermediateGPR, Structure::propertyTableUnsafeOffset()),
+            intermediateGPR);
+        
+        myFailAndIgnore.append(jit.branchTestPtr(CCallHelpers::Zero, intermediateGPR));
+        
+        jit.load32(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexMask()), maskGPR);
+        jit.loadPtr(CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndex()), indexGPR);
+        jit.load32(
+            CCallHelpers::Address(intermediateGPR, PropertyTable::offsetOfIndexSize()),
+            intermediateGPR);
+
+        jit.move(maskGPR, maskedHashGPR);
+        jit.and32(CCallHelpers::TrustedImm32(hash), maskedHashGPR);
+        jit.lshift32(CCallHelpers::TrustedImm32(2), intermediateGPR);
+        jit.addPtr(indexGPR, intermediateGPR);
+        
+        CCallHelpers::Label loop = jit.label();
+        
+        jit.load32(CCallHelpers::BaseIndex(indexGPR, maskedHashGPR, CCallHelpers::TimesFour), offsetGPR);
+        
+        myFallThrough.append(
+            jit.branch32(
+                CCallHelpers::Equal,
+                offsetGPR,
+                CCallHelpers::TrustedImm32(PropertyTable::EmptyEntryIndex)));
+        
+        jit.sub32(CCallHelpers::TrustedImm32(1), offsetGPR);
+        jit.mul32(CCallHelpers::TrustedImm32(sizeof(PropertyMapEntry)), offsetGPR, offsetGPR);
+        jit.addPtr(intermediateGPR, offsetGPR);
+        
+        CCallHelpers::Jump collision =  jit.branchPtr(
+            CCallHelpers::NotEqual,
+            CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, key)),
+            CCallHelpers::TrustedImmPtr(key));
+        
+        // offsetGPR currently holds a pointer to the PropertyMapEntry, which has the offset and attributes.
+        // Check them and then attempt the load.
+        
+        myFallThrough.append(
+            jit.branchTest32(
+                CCallHelpers::NonZero,
+                CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, attributes)),
+                CCallHelpers::TrustedImm32(Accessor | CustomAccessor)));
+        
+        jit.load32(CCallHelpers::Address(offsetGPR, OBJECT_OFFSETOF(PropertyMapEntry, offset)), offsetGPR);
+        
+        jit.loadProperty(baseGPR, offsetGPR, valueRegs);
+        
+        allocator.restoreReusedRegistersByPopping(jit, preservedState);
+        state.succeed();
+        
+        collision.link(&jit);
+
+        jit.add32(CCallHelpers::TrustedImm32(1), maskedHashGPR);
+        
+        // FIXME: We could be smarter about this. Currently we're burning a GPR for the mask. But looping
+        // around isn't super common so we could, for example, recompute the mask from the difference between
+        // the table and index. But before we do that we should probably make it easier to multiply and
+        // divide by the size of PropertyMapEntry. That probably involves making PropertyMapEntry be arranged
+        // to have a power-of-2 size.
+        jit.and32(maskGPR, maskedHashGPR);
+        jit.jump().linkTo(loop, &jit);
+        
+        if (allocator.didReuseRegisters()) {
+            myFailAndIgnore.link(&jit);
+            allocator.restoreReusedRegistersByPopping(jit, preservedState);
+            state.failAndIgnore.append(jit.jump());
+            
+            myFallThrough.link(&jit);
+            allocator.restoreReusedRegistersByPopping(jit, preservedState);
+            fallThrough.append(jit.jump());
+        } else {
+            state.failAndIgnore.append(myFailAndIgnore);
+            fallThrough.append(myFallThrough);
+        }
+        return;
+    }
+
+    default: {
+        if (viaProxy()) {
+            fallThrough.append(
+                jit.branch8(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(baseGPR, JSCell::typeInfoTypeOffset()),
+                    CCallHelpers::TrustedImm32(PureForwardingProxyType)));
+
+            jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
+
+            fallThrough.append(
+                jit.branchStructure(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+                    structure()));
+        } else {
+            fallThrough.append(
+                jit.branchStructure(
+                    CCallHelpers::NotEqual,
+                    CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()),
+                    structure()));
+        }
+        break;
+    } };
+
+    generateImpl(state);
+}
+
+void AccessCase::generate(AccessGenerationState& state)
+{
+    RELEASE_ASSERT(m_state == Committed);
+    m_state = Generated;
+    
+    generateImpl(state);
+}
+
+void AccessCase::generateImpl(AccessGenerationState& state)
+{
+    SuperSamplerScope superSamplerScope(false);
+    if (verbose)
+        dataLog("Generating code for: ", *this, "\n");
+    
+    ASSERT(m_state == Generated); // We rely on the callers setting this for us.
+    
+    CCallHelpers& jit = *state.jit;
+    VM& vm = *jit.vm();
+    CodeBlock* codeBlock = jit.codeBlock();
+    StructureStubInfo& stubInfo = *state.stubInfo;
+    const Identifier& ident = *state.ident;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+
+    ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
+
+    for (const ObjectPropertyCondition& condition : m_conditionSet) {
+        Structure* structure = condition.object()->structure();
+
+        if (condition.isWatchableAssumingImpurePropertyWatchpoint()) {
+            structure->addTransitionWatchpoint(state.addWatchpoint(condition));
+            continue;
+        }
+
+        if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)) {
+            // The reason why this cannot happen is that we require that PolymorphicAccess calls
+            // AccessCase::generate() only after it has verified that
+            // AccessCase::couldStillSucceed() returned true.
+            
+            dataLog("This condition is no longer met: ", condition, "\n");
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+
+        // We will emit code that has a weak reference that isn't otherwise listed anywhere.
+        state.weakReferences.append(WriteBarrier(vm, codeBlock, structure));
+        
+        jit.move(CCallHelpers::TrustedImmPtr(condition.object()), scratchGPR);
+        state.failAndRepatch.append(
+            jit.branchStructure(
+                CCallHelpers::NotEqual,
+                CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()),
+                structure));
+    }
+
+    switch (m_type) {
+    case InHit:
+    case InMiss:
+        jit.boxBooleanPayload(m_type == InHit, valueRegs.payloadGPR());
+        state.succeed();
+        return;
+
+    case Miss:
+        jit.moveTrustedValue(jsUndefined(), valueRegs);
+        state.succeed();
+        return;
+
+    case Load:
+    case GetGetter:
+    case Getter:
+    case Setter:
+    case CustomValueGetter:
+    case CustomAccessorGetter:
+    case CustomValueSetter:
+    case CustomAccessorSetter: {
+        GPRReg valueRegsPayloadGPR = valueRegs.payloadGPR();
+        
+        if (isValidOffset(m_offset)) {
+            Structure* currStructure;
+            if (m_conditionSet.isEmpty())
+                currStructure = structure();
+            else
+                currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+            currStructure->startWatchingPropertyForReplacements(vm, offset());
+        }
+
+        GPRReg baseForGetGPR;
+        if (viaProxy()) {
+            ASSERT(m_type != CustomValueSetter || m_type != CustomAccessorSetter); // Because setters need to not trash valueRegsPayloadGPR.
+            if (m_type == Getter || m_type == Setter)
+                baseForGetGPR = scratchGPR;
+            else
+                baseForGetGPR = valueRegsPayloadGPR;
+
+            ASSERT((m_type != Getter && m_type != Setter) || baseForGetGPR != baseGPR);
+            ASSERT(m_type != Setter || baseForGetGPR != valueRegsPayloadGPR);
+
+            jit.loadPtr(
+                CCallHelpers::Address(baseGPR, JSProxy::targetOffset()),
+                baseForGetGPR);
+        } else
+            baseForGetGPR = baseGPR;
+
+        GPRReg baseForAccessGPR;
+        if (!m_conditionSet.isEmpty()) {
+            jit.move(
+                CCallHelpers::TrustedImmPtr(alternateBase()),
+                scratchGPR);
+            baseForAccessGPR = scratchGPR;
+        } else
+            baseForAccessGPR = baseForGetGPR;
+
+        GPRReg loadedValueGPR = InvalidGPRReg;
+        if (m_type != CustomValueGetter && m_type != CustomAccessorGetter && m_type != CustomValueSetter && m_type != CustomAccessorSetter) {
+            if (m_type == Load || m_type == GetGetter)
+                loadedValueGPR = valueRegsPayloadGPR;
+            else
+                loadedValueGPR = scratchGPR;
+
+            ASSERT((m_type != Getter && m_type != Setter) || loadedValueGPR != baseGPR);
+            ASSERT(m_type != Setter || loadedValueGPR != valueRegsPayloadGPR);
+
+            GPRReg storageGPR;
+            if (isInlineOffset(m_offset))
+                storageGPR = baseForAccessGPR;
+            else {
+                jit.loadPtr(
+                    CCallHelpers::Address(baseForAccessGPR, JSObject::butterflyOffset()),
+                    loadedValueGPR);
+                storageGPR = loadedValueGPR;
+            }
+
+#if USE(JSVALUE64)
+            jit.load64(
+                CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset)), loadedValueGPR);
+#else
+            if (m_type == Load || m_type == GetGetter) {
+                jit.load32(
+                    CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + TagOffset),
+                    valueRegs.tagGPR());
+            }
+            jit.load32(
+                CCallHelpers::Address(storageGPR, offsetRelativeToBase(m_offset) + PayloadOffset),
+                loadedValueGPR);
+#endif
+        }
+
+        if (m_type == Load || m_type == GetGetter) {
+            state.succeed();
+            return;
+        }
+
+        if (Options::useDOMJIT() && m_type == CustomAccessorGetter && m_rareData->domJIT) {
+            // We do not need to emit CheckDOM operation since structure check ensures
+            // that the structure of the given base value is structure()! So all we should
+            // do is performing the CheckDOM thingy in IC compiling time here.
+            if (structure()->classInfo()->isSubClassOf(m_rareData->domJIT->thisClassInfo())) {
+                emitDOMJITGetter(state, baseForGetGPR);
+                return;
+            }
+        }
+
+        // Stuff for custom getters/setters.
+        CCallHelpers::Call operationCall;
+
+        // Stuff for JS getters/setters.
+        CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
+        CCallHelpers::Call fastPathCall;
+        CCallHelpers::Call slowPathCall;
+
+        // This also does the necessary calculations of whether or not we're an
+        // exception handling call site.
+        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
+
+        auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
+            RegisterSet dontRestore;
+            if (callHasReturnValue) {
+                // This is the result value. We don't want to overwrite the result with what we stored to the stack.
+                // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
+                dontRestore.set(valueRegs);
+            }
+            state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);
+        };
+
+        jit.store32(
+            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+            CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+
+        if (m_type == Getter || m_type == Setter) {
+            ASSERT(baseGPR != loadedValueGPR);
+            ASSERT(m_type != Setter || (baseGPR != valueRegsPayloadGPR && loadedValueGPR != valueRegsPayloadGPR));
+
+            // Create a JS call using a JS call inline cache. Assume that:
+            //
+            // - SP is aligned and represents the extent of the calling compiler's stack usage.
+            //
+            // - FP is set correctly (i.e. it points to the caller's call frame header).
+            //
+            // - SP - FP is an aligned difference.
+            //
+            // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
+            //   code.
+            //
+            // Therefore, we temporarily grow the stack for the purpose of the call and then
+            // shrink it after.
+
+            state.setSpillStateForJSGetterSetter(spillState);
+
+            RELEASE_ASSERT(!m_rareData->callLinkInfo);
+            m_rareData->callLinkInfo = std::make_unique();
+            
+            // FIXME: If we generated a polymorphic call stub that jumped back to the getter
+            // stub, which then jumped back to the main code, then we'd have a reachability
+            // situation that the GC doesn't know about. The GC would ensure that the polymorphic
+            // call stub stayed alive, and it would ensure that the main code stayed alive, but
+            // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
+            // be GC objects, and then we'd be able to say that the polymorphic call stub has a
+            // reference to the getter stub.
+            // https://bugs.webkit.org/show_bug.cgi?id=148914
+            m_rareData->callLinkInfo->disallowStubs();
+            
+            m_rareData->callLinkInfo->setUpCall(
+                CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
+
+            CCallHelpers::JumpList done;
+
+            // There is a "this" argument.
+            unsigned numberOfParameters = 1;
+            // ... and a value argument if we're calling a setter.
+            if (m_type == Setter)
+                numberOfParameters++;
+
+            // Get the accessor; if there ain't one then the result is jsUndefined().
+            if (m_type == Setter) {
+                jit.loadPtr(
+                    CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
+                    loadedValueGPR);
+            } else {
+                jit.loadPtr(
+                    CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
+                    loadedValueGPR);
+            }
+
+            CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
+                CCallHelpers::Zero, loadedValueGPR);
+
+            unsigned numberOfRegsForCall = CallFrame::headerSizeInRegisters + numberOfParameters;
+
+            unsigned numberOfBytesForCall =
+                numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
+
+            unsigned alignedNumberOfBytesForCall =
+                WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
+
+            jit.subPtr(
+                CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
+                CCallHelpers::stackPointerRegister);
+
+            CCallHelpers::Address calleeFrame = CCallHelpers::Address(
+                CCallHelpers::stackPointerRegister,
+                -static_cast(sizeof(CallerFrameAndPC)));
+
+            jit.store32(
+                CCallHelpers::TrustedImm32(numberOfParameters),
+                calleeFrame.withOffset(CallFrameSlot::argumentCount * sizeof(Register) + PayloadOffset));
+
+            jit.storeCell(
+                loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
+
+            jit.storeCell(
+                baseGPR,
+                calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
+
+            if (m_type == Setter) {
+                jit.storeValue(
+                    valueRegs,
+                    calleeFrame.withOffset(
+                        virtualRegisterForArgument(1).offset() * sizeof(Register)));
+            }
+
+            CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
+                CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
+                CCallHelpers::TrustedImmPtr(0));
+
+            fastPathCall = jit.nearCall();
+            if (m_type == Getter)
+                jit.setupResults(valueRegs);
+            done.append(jit.jump());
+
+            slowCase.link(&jit);
+            jit.move(loadedValueGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+            // We *always* know that the getter/setter, if non-null, is a cell.
+            jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+            jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
+            slowPathCall = jit.nearCall();
+            if (m_type == Getter)
+                jit.setupResults(valueRegs);
+            done.append(jit.jump());
+
+            returnUndefined.link(&jit);
+            if (m_type == Getter)
+                jit.moveTrustedValue(jsUndefined(), valueRegs);
+
+            done.link(&jit);
+
+            jit.addPtr(CCallHelpers::TrustedImm32((codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation),
+                GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+            bool callHasReturnValue = isGetter();
+            restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+
+            jit.addLinkTask(
+                [=, &vm] (LinkBuffer& linkBuffer) {
+                    m_rareData->callLinkInfo->setCallLocations(
+                        CodeLocationLabel(linkBuffer.locationOfNearCall(slowPathCall)),
+                        CodeLocationLabel(linkBuffer.locationOf(addressOfLinkFunctionCheck)),
+                        linkBuffer.locationOfNearCall(fastPathCall));
+
+                    linkBuffer.link(
+                        slowPathCall,
+                        CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
+                });
+        } else {
+            ASSERT(m_type == CustomValueGetter || m_type == CustomAccessorGetter || m_type == CustomValueSetter || m_type == CustomAccessorSetter);
+
+            // Need to make room for the C call so any of our stack spillage isn't overwritten. It's
+            // hard to track if someone did spillage or not, so we just assume that we always need
+            // to make some space here.
+            jit.makeSpaceOnStackForCCall();
+
+            // getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
+            // setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
+            // Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
+            // FIXME: Remove this differences in custom values and custom accessors.
+            // https://bugs.webkit.org/show_bug.cgi?id=158014
+            GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
+#if USE(JSVALUE64)
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+                jit.setupArgumentsWithExecState(
+                    baseForCustomValue,
+                    CCallHelpers::TrustedImmPtr(ident.impl()));
+            } else
+                jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
+#else
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
+                jit.setupArgumentsWithExecState(
+                    EABI_32BIT_DUMMY_ARG baseForCustomValue,
+                    CCallHelpers::TrustedImm32(JSValue::CellTag),
+                    CCallHelpers::TrustedImmPtr(ident.impl()));
+            } else {
+                jit.setupArgumentsWithExecState(
+                    EABI_32BIT_DUMMY_ARG baseForCustomValue,
+                    CCallHelpers::TrustedImm32(JSValue::CellTag),
+                    valueRegs.payloadGPR(), valueRegs.tagGPR());
+            }
+#endif
+            jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
+
+            operationCall = jit.call();
+            jit.addLinkTask(
+                [=] (LinkBuffer& linkBuffer) {
+                    linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
+                });
+
+            if (m_type == CustomValueGetter || m_type == CustomAccessorGetter)
+                jit.setupResults(valueRegs);
+            jit.reclaimSpaceOnStackForCCall();
+
+            CCallHelpers::Jump noException =
+                jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+            state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+            state.emitExplicitExceptionHandler();
+        
+            noException.link(&jit);
+            bool callHasReturnValue = isGetter();
+            restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
+        }
+        state.succeed();
+        return;
+    }
+
+    case Replace: {
+        if (InferredType* type = structure()->inferredTypeFor(ident.impl())) {
+            if (verbose)
+                dataLog("Have type: ", type->descriptor(), "\n");
+            state.failAndRepatch.append(
+                jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+        } else if (verbose)
+            dataLog("Don't have type.\n");
+        
+        if (isInlineOffset(m_offset)) {
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    baseGPR,
+                    JSObject::offsetOfInlineStorage() +
+                    offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+        } else {
+            jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+        }
+        state.succeed();
+        return;
+    }
+
+    case Transition: {
+        // AccessCase::transition() should have returned null if this wasn't true.
+        RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());
+
+        if (InferredType* type = newStructure()->inferredTypeFor(ident.impl())) {
+            if (verbose)
+                dataLog("Have type: ", type->descriptor(), "\n");
+            state.failAndRepatch.append(
+                jit.branchIfNotType(valueRegs, scratchGPR, type->descriptor()));
+        } else if (verbose)
+            dataLog("Don't have type.\n");
+        
+        // NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
+        // exactly when this would make calls.
+        bool allocating = newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity();
+        bool reallocating = allocating && structure()->outOfLineCapacity();
+        bool allocatingInline = allocating && !structure()->couldHaveIndexingHeader();
+
+        ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+        allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+        allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+        allocator.lock(valueRegs);
+        allocator.lock(scratchGPR);
+
+        GPRReg scratchGPR2 = InvalidGPRReg;
+        GPRReg scratchGPR3 = InvalidGPRReg;
+        if (allocatingInline) {
+            scratchGPR2 = allocator.allocateScratchGPR();
+            scratchGPR3 = allocator.allocateScratchGPR();
+        }
+
+        ScratchRegisterAllocator::PreservedState preservedState =
+            allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+        
+        CCallHelpers::JumpList slowPath;
+
+        ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
+
+        if (allocating) {
+            size_t newSize = newStructure()->outOfLineCapacity() * sizeof(JSValue);
+            
+            if (allocatingInline) {
+                MarkedAllocator* allocator = vm.auxiliarySpace.allocatorFor(newSize);
+                
+                if (!allocator) {
+                    // Yuck, this case would suck!
+                    slowPath.append(jit.jump());
+                }
+                
+                jit.move(CCallHelpers::TrustedImmPtr(allocator), scratchGPR2);
+                jit.emitAllocate(scratchGPR, allocator, scratchGPR2, scratchGPR3, slowPath);
+                jit.addPtr(CCallHelpers::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR);
+                
+                size_t oldSize = structure()->outOfLineCapacity() * sizeof(JSValue);
+                ASSERT(newSize > oldSize);
+                
+                if (reallocating) {
+                    // Handle the case where we are reallocating (i.e. the old structure/butterfly
+                    // already had out-of-line property storage).
+                    
+                    jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+                    
+                    // We have scratchGPR = new storage, scratchGPR3 = old storage,
+                    // scratchGPR2 = available
+                    for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+                        jit.loadPtr(
+                            CCallHelpers::Address(
+                                scratchGPR3,
+                                -static_cast(
+                                    offset + sizeof(JSValue) + sizeof(void*))),
+                            scratchGPR2);
+                        jit.storePtr(
+                            scratchGPR2,
+                            CCallHelpers::Address(
+                                scratchGPR,
+                                -static_cast(offset + sizeof(JSValue) + sizeof(void*))));
+                    }
+                }
+                
+                for (size_t offset = oldSize; offset < newSize; offset += sizeof(void*))
+                    jit.storePtr(CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address(scratchGPR, -static_cast(offset + sizeof(JSValue) + sizeof(void*))));
+            } else {
+                // Handle the case where we are allocating out-of-line using an operation.
+                RegisterSet extraRegistersToPreserve;
+                extraRegistersToPreserve.set(baseGPR);
+                extraRegistersToPreserve.set(valueRegs);
+                AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
+                
+                jit.store32(
+                    CCallHelpers::TrustedImm32(
+                        state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+                    CCallHelpers::tagFor(static_cast(CallFrameSlot::argumentCount)));
+                
+                jit.makeSpaceOnStackForCCall();
+                
+                if (!reallocating) {
+                    jit.setupArgumentsWithExecState(baseGPR);
+                    
+                    CCallHelpers::Call operationCall = jit.call();
+                    jit.addLinkTask(
+                        [=] (LinkBuffer& linkBuffer) {
+                            linkBuffer.link(
+                                operationCall,
+                                FunctionPtr(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity));
+                        });
+                } else {
+                    // Handle the case where we are reallocating (i.e. the old structure/butterfly
+                    // already had out-of-line property storage).
+                    jit.setupArgumentsWithExecState(
+                        baseGPR, CCallHelpers::TrustedImm32(newSize / sizeof(JSValue)));
+                    
+                    CCallHelpers::Call operationCall = jit.call();
+                    jit.addLinkTask(
+                        [=] (LinkBuffer& linkBuffer) {
+                            linkBuffer.link(
+                                operationCall,
+                                FunctionPtr(operationReallocateButterflyToGrowPropertyStorage));
+                        });
+                }
+                
+                jit.reclaimSpaceOnStackForCCall();
+                jit.move(GPRInfo::returnValueGPR, scratchGPR);
+                
+                CCallHelpers::Jump noException =
+                    jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+                
+                state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
+                state.emitExplicitExceptionHandler();
+                
+                noException.link(&jit);
+                state.restoreLiveRegistersFromStackForCall(spillState);
+            }
+        }
+
+        if (isInlineOffset(m_offset)) {
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(
+                    baseGPR,
+                    JSObject::offsetOfInlineStorage() +
+                    offsetInInlineStorage(m_offset) * sizeof(JSValue)));
+        } else {
+            if (!allocating)
+                jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+            jit.storeValue(
+                valueRegs,
+                CCallHelpers::Address(scratchGPR, offsetInButterfly(m_offset) * sizeof(JSValue)));
+        }
+        
+        if (allocatingInline) {
+            // We set the new butterfly and the structure last. Doing it this way ensures that
+            // whatever we had done up to this point is forgotten if we choose to branch to slow
+            // path.
+            jit.nukeStructureAndStoreButterfly(scratchGPR, baseGPR);
+        }
+        
+        uint32_t structureBits = bitwise_cast(newStructure()->id());
+        jit.store32(
+            CCallHelpers::TrustedImm32(structureBits),
+            CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()));
+
+        allocator.restoreReusedRegistersByPopping(jit, preservedState);
+        state.succeed();
+        
+        // We will have a slow path if we were allocating without the help of an operation.
+        if (allocatingInline) {
+            if (allocator.didReuseRegisters()) {
+                slowPath.link(&jit);
+                allocator.restoreReusedRegistersByPopping(jit, preservedState);
+                state.failAndIgnore.append(jit.jump());
+            } else
+                state.failAndIgnore.append(slowPath);
+        } else
+            RELEASE_ASSERT(slowPath.empty());
+        return;
+    }
+
+    case ArrayLength: {
+        jit.loadPtr(CCallHelpers::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+        jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+        state.failAndIgnore.append(
+            jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
+        jit.boxInt32(scratchGPR, valueRegs);
+        state.succeed();
+        return;
+    }
+
+    case StringLength: {
+        jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
+        jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
+        state.succeed();
+        return;
+    }
+        
+    case IntrinsicGetter: {
+        RELEASE_ASSERT(isValidOffset(offset()));
+
+        // We need to ensure the getter value does not move from under us. Note that GetterSetters
+        // are immutable so we just need to watch the property not any value inside it.
+        Structure* currStructure;
+        if (m_conditionSet.isEmpty())
+            currStructure = structure();
+        else
+            currStructure = m_conditionSet.slotBaseCondition().object()->structure();
+        currStructure->startWatchingPropertyForReplacements(vm, offset());
+
+        emitIntrinsicGetter(state);
+        return;
+    }
+
+    case DirectArgumentsLength:
+    case ScopedArgumentsLength:
+    case MegamorphicLoad:
+        // These need to be handled by generateWithGuard(), since the guard is part of the
+        // algorithm. We can be sure that nobody will call generate() directly for these since they
+        // are not guarded by structure checks.
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void AccessCase::emitDOMJITGetter(AccessGenerationState& state, GPRReg baseForGetGPR)
+{
+    CCallHelpers& jit = *state.jit;
+    StructureStubInfo& stubInfo = *state.stubInfo;
+    JSValueRegs valueRegs = state.valueRegs;
+    GPRReg baseGPR = state.baseGPR;
+    GPRReg scratchGPR = state.scratchGPR;
+
+    // We construct the environment that can execute the DOMJIT::Patchpoint here.
+    Ref patchpoint = m_rareData->domJIT->callDOMGetter();
+
+    Vector gpScratch;
+    Vector fpScratch;
+    Vector regs;
+
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+    allocator.lock(valueRegs);
+    allocator.lock(scratchGPR);
+
+    GPRReg paramBaseGPR = InvalidGPRReg;
+    GPRReg paramGlobalObjectGPR = InvalidGPRReg;
+    JSValueRegs paramValueRegs = valueRegs;
+    GPRReg remainingScratchGPR = InvalidGPRReg;
+
+    // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
+    // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
+    // DOMJIT::Patchpoint, DOMJIT::Patchpoint assumes that result registers always early interfere with input registers, in this case,
+    // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
+    if (baseForGetGPR != valueRegs.payloadGPR()) {
+        paramBaseGPR = baseForGetGPR;
+        if (!patchpoint->requireGlobalObject)
+            remainingScratchGPR = scratchGPR;
+        else
+            paramGlobalObjectGPR = scratchGPR;
+    } else {
+        jit.move(valueRegs.payloadGPR(), scratchGPR);
+        paramBaseGPR = scratchGPR;
+        if (patchpoint->requireGlobalObject)
+            paramGlobalObjectGPR = allocator.allocateScratchGPR();
+    }
+
+    JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();
+
+    regs.append(paramValueRegs);
+    regs.append(paramBaseGPR);
+    if (patchpoint->requireGlobalObject) {
+        ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
+        regs.append(DOMJIT::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
+    }
+
+    if (patchpoint->numGPScratchRegisters) {
+        unsigned i = 0;
+        if (remainingScratchGPR != InvalidGPRReg) {
+            gpScratch.append(remainingScratchGPR);
+            ++i;
+        }
+        for (; i < patchpoint->numGPScratchRegisters; ++i)
+            gpScratch.append(allocator.allocateScratchGPR());
+    }
+
+    for (unsigned i = 0; i < patchpoint->numFPScratchRegisters; ++i)
+        fpScratch.append(allocator.allocateScratchFPR());
+
+    // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
+    ScratchRegisterAllocator::PreservedState preservedState =
+        allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+
+    if (verbose) {
+        dataLog("baseGPR = ", baseGPR, "\n");
+        dataLog("valueRegs = ", valueRegs, "\n");
+        dataLog("scratchGPR = ", scratchGPR, "\n");
+        dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
+        if (paramGlobalObjectGPR != InvalidGPRReg)
+            dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
+        dataLog("paramValueRegs = ", paramValueRegs, "\n");
+        for (unsigned i = 0; i < patchpoint->numGPScratchRegisters; ++i)
+            dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
+    }
+
+    if (patchpoint->requireGlobalObject)
+        jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);
+
+    // We just spill the registers used in DOMJIT::Patchpoint here. For not spilled registers here explicitly,
+    // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
+    // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
+    // same to valueRegs, and not include it in the used registers since it will be changed.
+    RegisterSet registersToSpillForCCall;
+    for (auto& value : regs) {
+        DOMJIT::Reg reg = value.reg();
+        if (reg.isJSValueRegs())
+            registersToSpillForCCall.set(reg.jsValueRegs());
+        else if (reg.isGPR())
+            registersToSpillForCCall.set(reg.gpr());
+        else
+            registersToSpillForCCall.set(reg.fpr());
+    }
+    for (GPRReg reg : gpScratch)
+        registersToSpillForCCall.set(reg);
+    for (FPRReg reg : fpScratch)
+        registersToSpillForCCall.set(reg);
+    registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
+
+    DOMJITAccessCasePatchpointParams params(WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
+    patchpoint->generator()->run(jit, params);
+    allocator.restoreReusedRegistersByPopping(jit, preservedState);
+    state.succeed();
+
+    CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
+    if (!exceptions.empty()) {
+        exceptions.link(&jit);
+        allocator.restoreReusedRegistersByPopping(jit, preservedState);
+        state.emitExplicitExceptionHandler();
+    }
+}
+
+PolymorphicAccess::PolymorphicAccess() { }
+PolymorphicAccess::~PolymorphicAccess() { }
+
+AccessGenerationResult PolymorphicAccess::addCases(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+    Vector, 2> originalCasesToAdd)
+{
+    SuperSamplerScope superSamplerScope(false);
+    
+    // This method will add the originalCasesToAdd to the list one at a time while preserving the
+    // invariants:
+    // - If a newly added case canReplace() any existing case, then the existing case is removed before
+    //   the new case is added. Removal doesn't change order of the list. Any number of existing cases
+    //   can be removed via the canReplace() rule.
+    // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
+    //   cascade through the cases in reverse order, you will get the most recent cases first.
+    // - If this method fails (returns null, doesn't add the cases), then both the previous case list
+    //   and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
+    //   add more things after failure.
+    
+    // First ensure that the originalCasesToAdd doesn't contain duplicates.
+    Vector> casesToAdd;
+    for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
+        std::unique_ptr myCase = WTFMove(originalCasesToAdd[i]);
+
+        // Add it only if it is not replaced by the subsequent cases in the list.
+        bool found = false;
+        for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
+            if (originalCasesToAdd[j]->canReplace(*myCase)) {
+                found = true;
+                break;
+            }
+        }
+
+        if (found)
+            continue;
+        
+        casesToAdd.append(WTFMove(myCase));
+    }
+
+    if (verbose)
+        dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
+
+    // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
+    // new stub that will be identical to the old one. Returning null should tell the caller to just
+    // keep doing what they were doing before.
+    if (casesToAdd.isEmpty())
+        return AccessGenerationResult::MadeNoChanges;
+
+    // Now add things to the new list. Note that at this point, we will still have old cases that
+    // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
+    for (auto& caseToAdd : casesToAdd) {
+        commit(vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
+        m_list.append(WTFMove(caseToAdd));
+    }
+    
+    if (verbose)
+        dataLog("After addCases: m_list: ", listDump(m_list), "\n");
+
+    return AccessGenerationResult::Buffered;
+}
+
+AccessGenerationResult PolymorphicAccess::addCase(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident,
+    std::unique_ptr newAccess)
+{
+    Vector, 2> newAccesses;
+    newAccesses.append(WTFMove(newAccess));
+    return addCases(vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
+}
+
+bool PolymorphicAccess::visitWeak(VM& vm) const
+{
+    for (unsigned i = 0; i < size(); ++i) {
+        if (!at(i).visitWeak(vm))
+            return false;
+    }
+    if (Vector>* weakReferences = m_weakReferences.get()) {
+        for (WriteBarrier& weakReference : *weakReferences) {
+            if (!Heap::isMarked(weakReference.get()))
+                return false;
+        }
+    }
+    return true;
+}
+
+bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
+{
+    bool result = true;
+    for (unsigned i = 0; i < size(); ++i)
+        result &= at(i).propagateTransitions(visitor);
+    return result;
+}
+
+void PolymorphicAccess::dump(PrintStream& out) const
+{
+    out.print(RawPointer(this), ":[");
+    CommaPrinter comma;
+    for (auto& entry : m_list)
+        out.print(comma, *entry);
+    out.print("]");
+}
+
+void PolymorphicAccess::commit(
+    VM& vm, std::unique_ptr& watchpoints, CodeBlock* codeBlock,
+    StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
+{
+    // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
+    // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
+    // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
+    // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
+    // Those common kinds of JSC object accesses don't hit this case.
+    
+    for (WatchpointSet* set : accessCase.commit(vm, ident)) {
+        Watchpoint* watchpoint =
+            WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+                watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
+        
+        set->add(watchpoint);
+    }
+}
+
+AccessGenerationResult PolymorphicAccess::regenerate(
+    VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
+{
+    SuperSamplerScope superSamplerScope(false);
+    
+    if (verbose)
+        dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
+    
+    AccessGenerationState state;
+
+    state.access = this;
+    state.stubInfo = &stubInfo;
+    state.ident = &ident;
+    
+    state.baseGPR = static_cast(stubInfo.patch.baseGPR);
+    state.valueRegs = stubInfo.valueRegs();
+
+    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+    state.allocator = &allocator;
+    allocator.lock(state.baseGPR);
+    allocator.lock(state.valueRegs);
+#if USE(JSVALUE32_64)
+    allocator.lock(static_cast(stubInfo.patch.baseTagGPR));
+#endif
+
+    state.scratchGPR = allocator.allocateScratchGPR();
+    
+    CCallHelpers jit(&vm, codeBlock);
+    state.jit = &jit;
+
+    state.preservedReusedRegisterState =
+        allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+    // Regenerating is our opportunity to figure out what our list of cases should look like. We
+    // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
+    // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
+    // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
+    // from the code of the current stub (aka previous).
+    ListType cases;
+    unsigned srcIndex = 0;
+    unsigned dstIndex = 0;
+    while (srcIndex < m_list.size()) {
+        std::unique_ptr someCase = WTFMove(m_list[srcIndex++]);
+        
+        // If the case had been generated, then we have to keep the original in m_list in case we
+        // fail to regenerate. That case may have data structures that are used by the code that it
+        // had generated. If the case had not been generated, then we want to remove it from m_list.
+        bool isGenerated = someCase->state() == AccessCase::Generated;
+        
+        [&] () {
+            if (!someCase->couldStillSucceed())
+                return;
+
+            // Figure out if this is replaced by any later case.
+            for (unsigned j = srcIndex; j < m_list.size(); ++j) {
+                if (m_list[j]->canReplace(*someCase))
+                    return;
+            }
+            
+            if (isGenerated)
+                cases.append(someCase->clone());
+            else
+                cases.append(WTFMove(someCase));
+        }();
+        
+        if (isGenerated)
+            m_list[dstIndex++] = WTFMove(someCase);
+    }
+    m_list.resize(dstIndex);
+    
+    if (verbose)
+        dataLog("In regenerate: cases: ", listDump(cases), "\n");
+    
+    // Now that we've removed obviously unnecessary cases, we can check if the megamorphic load
+    // optimization is applicable. Note that we basically tune megamorphicLoadCost according to code
+    // size. It would be faster to just allow more repatching with many load cases, and avoid the
+    // megamorphicLoad optimization, if we had infinite executable memory.
+    if (cases.size() >= Options::maxAccessVariantListSize()) {
+        unsigned numSelfLoads = 0;
+        for (auto& newCase : cases) {
+            if (newCase->canBeReplacedByMegamorphicLoad())
+                numSelfLoads++;
+        }
+        
+        if (numSelfLoads >= Options::megamorphicLoadCost()) {
+            if (auto mega = AccessCase::megamorphicLoad(vm, codeBlock)) {
+                cases.removeAllMatching(
+                    [&] (std::unique_ptr& newCase) -> bool {
+                        return newCase->canBeReplacedByMegamorphicLoad();
+                    });
+                
+                cases.append(WTFMove(mega));
+            }
+        }
+    }
+    
+    if (verbose)
+        dataLog("Optimized cases: ", listDump(cases), "\n");
+    
+    // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
+    // won't change that set anymore.
+    
+    bool allGuardedByStructureCheck = true;
+    bool hasJSGetterSetterCall = false;
+    for (auto& newCase : cases) {
+        commit(vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
+        allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
+        if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
+            hasJSGetterSetterCall = true;
+    }
+
+    if (cases.isEmpty()) {
+        // This is super unlikely, but we make it legal anyway.
+        state.failAndRepatch.append(jit.jump());
+    } else if (!allGuardedByStructureCheck || cases.size() == 1) {
+        // If there are any proxies in the list, we cannot just use a binary switch over the structure.
+        // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
+        // one case.
+        CCallHelpers::JumpList fallThrough;
+
+        // Cascade through the list, preferring newer entries.
+        for (unsigned i = cases.size(); i--;) {
+            fallThrough.link(&jit);
+            fallThrough.clear();
+            cases[i]->generateWithGuard(state, fallThrough);
+        }
+        state.failAndRepatch.append(fallThrough);
+    } else {
+        jit.load32(
+            CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
+            state.scratchGPR);
+        
+        Vector caseValues(cases.size());
+        for (unsigned i = 0; i < cases.size(); ++i)
+            caseValues[i] = bitwise_cast(cases[i]->structure()->id());
+        
+        BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
+        while (binarySwitch.advance(jit))
+            cases[binarySwitch.caseIndex()]->generate(state);
+        state.failAndRepatch.append(binarySwitch.fallThrough());
+    }
+
+    if (!state.failAndIgnore.empty()) {
+        state.failAndIgnore.link(&jit);
+        
+        // Make sure that the inline cache optimization code knows that we are taking slow path because
+        // of something that isn't patchable. The slow path will decrement "countdown" and will only
+        // patch things if the countdown reaches zero. We increment the slow path count here to ensure
+        // that the slow path does not try to patch.
+#if CPU(X86) || CPU(X86_64)
+        jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
+        jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
+#else
+        jit.load8(&stubInfo.countdown, state.scratchGPR);
+        jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
+        jit.store8(state.scratchGPR, &stubInfo.countdown);
+#endif
+    }
+
+    CCallHelpers::JumpList failure;
+    if (allocator.didReuseRegisters()) {
+        state.failAndRepatch.link(&jit);
+        state.restoreScratch();
+    } else
+        failure = state.failAndRepatch;
+    failure.append(jit.jump());
+
+    CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
+    CallSiteIndex callSiteIndexForExceptionHandling;
+    if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
+        // Emit the exception handler.
+        // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
+        // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 
+        // their own exception handling logic that doesn't go through genericUnwind.
+        MacroAssembler::Label makeshiftCatchHandler = jit.label();
+
+        int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
+        AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
+        ASSERT(!spillStateForJSGetterSetter.isEmpty());
+        stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
+        stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
+
+        jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+        jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+        state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
+        state.restoreScratch();
+        CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+        HandlerInfo oldHandler = state.originalExceptionHandler();
+        CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
+        jit.addLinkTask(
+            [=] (LinkBuffer& linkBuffer) {
+                linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
+
+                HandlerInfo handlerToRegister = oldHandler;
+                handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+                handlerToRegister.start = newExceptionHandlingCallSite.bits();
+                handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
+                codeBlock->appendExceptionHandler(handlerToRegister);
+            });
+
+        // We set these to indicate to the stub to remove itself from the CodeBlock's
+        // exception handler table when it is deallocated.
+        codeBlockThatOwnsExceptionHandlers = codeBlock;
+        ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
+        callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
+    }
+
+    LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+    if (linkBuffer.didFailToAllocate()) {
+        if (verbose)
+            dataLog("Did fail to allocate.\n");
+        return AccessGenerationResult::GaveUp;
+    }
+
+    CodeLocationLabel successLabel = stubInfo.doneLocation();
+        
+    linkBuffer.link(state.success, successLabel);
+
+    linkBuffer.link(failure, stubInfo.slowPathStartLocation());
+    
+    if (verbose)
+        dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
+
+    MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
+        codeBlock, linkBuffer,
+        ("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
+
+    bool doesCalls = false;
+    Vector cellsToMark;
+    for (auto& entry : cases)
+        doesCalls |= entry->doesCalls(&cellsToMark);
+    
+    m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
+    m_watchpoints = WTFMove(state.watchpoints);
+    if (!state.weakReferences.isEmpty())
+        m_weakReferences = std::make_unique>>(WTFMove(state.weakReferences));
+    if (verbose)
+        dataLog("Returning: ", code.code(), "\n");
+    
+    m_list = WTFMove(cases);
+    
+    AccessGenerationResult::Kind resultKind;
+    if (m_list.size() >= Options::maxAccessVariantListSize())
+        resultKind = AccessGenerationResult::GeneratedFinalCode;
+    else
+        resultKind = AccessGenerationResult::GeneratedNewCode;
+    
+    return AccessGenerationResult(resultKind, code.code());
+}
+
+void PolymorphicAccess::aboutToDie()
+{
+    if (m_stubRoutine)
+        m_stubRoutine->aboutToDie();
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
+{
+    switch (kind) {
+    case AccessGenerationResult::MadeNoChanges:
+        out.print("MadeNoChanges");
+        return;
+    case AccessGenerationResult::GaveUp:
+        out.print("GaveUp");
+        return;
+    case AccessGenerationResult::Buffered:
+        out.print("Buffered");
+        return;
+    case AccessGenerationResult::GeneratedNewCode:
+        out.print("GeneratedNewCode");
+        return;
+    case AccessGenerationResult::GeneratedFinalCode:
+        out.print("GeneratedFinalCode");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::AccessType type)
+{
+    switch (type) {
+    case AccessCase::Load:
+        out.print("Load");
+        return;
+    case AccessCase::MegamorphicLoad:
+        out.print("MegamorphicLoad");
+        return;
+    case AccessCase::Transition:
+        out.print("Transition");
+        return;
+    case AccessCase::Replace:
+        out.print("Replace");
+        return;
+    case AccessCase::Miss:
+        out.print("Miss");
+        return;
+    case AccessCase::GetGetter:
+        out.print("GetGetter");
+        return;
+    case AccessCase::Getter:
+        out.print("Getter");
+        return;
+    case AccessCase::Setter:
+        out.print("Setter");
+        return;
+    case AccessCase::CustomValueGetter:
+        out.print("CustomValueGetter");
+        return;
+    case AccessCase::CustomAccessorGetter:
+        out.print("CustomAccessorGetter");
+        return;
+    case AccessCase::CustomValueSetter:
+        out.print("CustomValueSetter");
+        return;
+    case AccessCase::CustomAccessorSetter:
+        out.print("CustomAccessorSetter");
+        return;
+    case AccessCase::IntrinsicGetter:
+        out.print("IntrinsicGetter");
+        return;
+    case AccessCase::InHit:
+        out.print("InHit");
+        return;
+    case AccessCase::InMiss:
+        out.print("InMiss");
+        return;
+    case AccessCase::ArrayLength:
+        out.print("ArrayLength");
+        return;
+    case AccessCase::StringLength:
+        out.print("StringLength");
+        return;
+    case AccessCase::DirectArgumentsLength:
+        out.print("DirectArgumentsLength");
+        return;
+    case AccessCase::ScopedArgumentsLength:
+        out.print("ScopedArgumentsLength");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void printInternal(PrintStream& out, AccessCase::State state)
+{
+    switch (state) {
+    case AccessCase::Primordial:
+        out.print("Primordial");
+        return;
+    case AccessCase::Committed:
+        out.print("Committed");
+        return;
+    case AccessCase::Generated:
+        out.print("Generated");
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+
diff --git a/bytecode/PolymorphicAccess.h b/bytecode/PolymorphicAccess.h
new file mode 100644
index 0000000..6f701c5
--- /dev/null
+++ b/bytecode/PolymorphicAccess.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "JITStubRoutine.h"
+#include "JSFunctionInlines.h"
+#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
+#include "ScratchRegisterAllocator.h"
+#include "Structure.h"
+#include 
+
+namespace JSC {
+namespace DOMJIT {
+class GetterSetter;
+}
+
+class CodeBlock;
+class PolymorphicAccess;
+class StructureStubInfo;
+class WatchpointsOnStructureStubInfo;
+class ScratchRegisterAllocator;
+
+struct AccessGenerationState;
+
+// An AccessCase describes one of the cases of a PolymorphicAccess. A PolymorphicAccess represents a
+// planned (to generate in future) or generated stub for some inline cache. That stub contains fast
+// path code for some finite number of fast cases, each described by an AccessCase object.
+//
+// An AccessCase object has a lifecycle that proceeds through several states. Note that the states
+// of AccessCase have a lot to do with the global effect epoch (we'll say epoch for short). This is
+// a simple way of reasoning about the state of the system outside this AccessCase. Any observable
+// effect - like storing to a property, changing an object's structure, etc. - increments the epoch.
+// The states are:
+//
+// Primordial:   This is an AccessCase that was just allocated. It does not correspond to any actual
+//               code and it is not owned by any PolymorphicAccess. In this state, the AccessCase
+//               assumes that it is in the same epoch as when it was created. This is important
+//               because it may make claims about itself ("I represent a valid case so long as you
+//               register a watchpoint on this set") that could be contradicted by some outside
+//               effects (like firing and deleting the watchpoint set in question). This is also the
+//               state that an AccessCase is in when it is cloned (AccessCase::clone()).
+//
+// Committed:    This happens as soon as some PolymorphicAccess takes ownership of this AccessCase.
+//               In this state, the AccessCase no longer assumes anything about the epoch. To
+//               accomplish this, PolymorphicAccess calls AccessCase::commit(). This must be done
+//               during the same epoch when the AccessCase was created, either by the client or by
+//               clone(). When created by the client, committing during the same epoch works because
+//               we can be sure that whatever watchpoint sets they spoke of are still valid. When
+//               created by clone(), we can be sure that the set is still valid because the original
+//               of the clone still has watchpoints on it.
+//
+// Generated:    This is the state when the PolymorphicAccess generates code for this case by
+//               calling AccessCase::generate() or AccessCase::generateWithGuard(). At this point
+//               the case object will have some extra stuff in it, like possibly the CallLinkInfo
+//               object associated with the inline cache.
+//               FIXME: Moving into the Generated state should not mutate the AccessCase object or
+//               put more stuff into it. If we fix this, then we can get rid of AccessCase::clone().
+//               https://bugs.webkit.org/show_bug.cgi?id=156456
+//
+// An AccessCase may be destroyed while in any of these states.
+//
+// We will sometimes buffer committed AccessCases in the PolymorphicAccess object before generating
+// code. This allows us to only regenerate once we've accumulated (hopefully) more than one new
+// AccessCase.
+class AccessCase {
+    WTF_MAKE_NONCOPYABLE(AccessCase);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    enum AccessType : uint8_t {
+        Load,
+        MegamorphicLoad,
+        Transition,
+        Replace,
+        Miss,
+        GetGetter,
+        Getter,
+        Setter,
+        CustomValueGetter,
+        CustomAccessorGetter,
+        CustomValueSetter,
+        CustomAccessorSetter,
+        IntrinsicGetter,
+        InHit,
+        InMiss,
+        ArrayLength,
+        StringLength,
+        DirectArgumentsLength,
+        ScopedArgumentsLength
+    };
+    
+    enum State : uint8_t {
+        Primordial,
+        Committed,
+        Generated
+    };
+
+    static std::unique_ptr tryGet(
+        VM&, JSCell* owner, AccessType, PropertyOffset, Structure*,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        bool viaProxy = false,
+        WatchpointSet* additionalSet = nullptr);
+
+    static std::unique_ptr get(
+        VM&, JSCell* owner, AccessType, PropertyOffset, Structure*,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(),
+        bool viaProxy = false,
+        WatchpointSet* additionalSet = nullptr,
+        PropertySlot::GetValueFunc = nullptr,
+        JSObject* customSlotBase = nullptr,
+        DOMJIT::GetterSetter* = nullptr);
+    
+    static std::unique_ptr megamorphicLoad(VM&, JSCell* owner);
+    
+    static std::unique_ptr replace(VM&, JSCell* owner, Structure*, PropertyOffset);
+
+    static std::unique_ptr transition(
+        VM&, JSCell* owner, Structure* oldStructure, Structure* newStructure, PropertyOffset,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+    static std::unique_ptr setter(
+        VM&, JSCell* owner, AccessType, Structure*, PropertyOffset,
+        const ObjectPropertyConditionSet&, PutPropertySlot::PutValueFunc = nullptr,
+        JSObject* customSlotBase = nullptr);
+
+    static std::unique_ptr in(
+        VM&, JSCell* owner, AccessType, Structure*,
+        const ObjectPropertyConditionSet& = ObjectPropertyConditionSet());
+
+    static std::unique_ptr getLength(VM&, JSCell* owner, AccessType);
+    static std::unique_ptr getIntrinsic(VM&, JSCell* owner, JSFunction* intrinsic, PropertyOffset, Structure*, const ObjectPropertyConditionSet&);
+    
+    static std::unique_ptr fromStructureStubInfo(VM&, JSCell* owner, StructureStubInfo&);
+
+    ~AccessCase();
+    
+    AccessType type() const { return m_type; }
+    State state() const { return m_state; }
+    PropertyOffset offset() const { return m_offset; }
+    bool viaProxy() const { return m_rareData ? m_rareData->viaProxy : false; }
+    
+    Structure* structure() const
+    {
+        if (m_type == Transition)
+            return m_structure->previousID();
+        return m_structure.get();
+    }
+    bool guardedByStructureCheck() const;
+
+    Structure* newStructure() const
+    {
+        ASSERT(m_type == Transition);
+        return m_structure.get();
+    }
+    
+    ObjectPropertyConditionSet conditionSet() const { return m_conditionSet; }
+    JSFunction* intrinsicFunction() const
+    {
+        ASSERT(type() == IntrinsicGetter && m_rareData);
+        return m_rareData->intrinsicFunction.get();
+    }
+    Intrinsic intrinsic() const
+    {
+        return intrinsicFunction()->intrinsic();
+    }
+
+    DOMJIT::GetterSetter* domJIT() const
+    {
+        ASSERT(m_rareData);
+        return m_rareData->domJIT;
+    }
+
+    WatchpointSet* additionalSet() const
+    {
+        return m_rareData ? m_rareData->additionalSet.get() : nullptr;
+    }
+
+    JSObject* customSlotBase() const
+    {
+        return m_rareData ? m_rareData->customSlotBase.get() : nullptr;
+    }
+
+    JSObject* alternateBase() const;
+
+    // If you supply the optional vector, this will append the set of cells that this will need to keep alive
+    // past the call.
+    bool doesCalls(Vector* cellsToMark = nullptr) const;
+
+    bool isGetter() const
+    {
+        switch (type()) {
+        case Getter:
+        case CustomValueGetter:
+        case CustomAccessorGetter:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    // This can return null even for a getter/setter, if it hasn't been generated yet. That's
+    // actually somewhat likely because of how we do buffering of new cases.
+    CallLinkInfo* callLinkInfo() const
+    {
+        if (!m_rareData)
+            return nullptr;
+        return m_rareData->callLinkInfo.get();
+    }
+    
+    // Is it still possible for this case to ever be taken?  Must call this as a prerequisite for
+    // calling generate() and friends.  If this returns true, then you can call generate().  If
+    // this returns false, then generate() will crash.  You must call generate() in the same epoch
+    // as when you called couldStillSucceed().
+    bool couldStillSucceed() const;
+    
+    static bool canEmitIntrinsicGetter(JSFunction*, Structure*);
+
+    bool canBeReplacedByMegamorphicLoad() const;
+
+    // If this method returns true, then it's a good idea to remove 'other' from the access once 'this'
+    // is added. This method assumes that in case of contradictions, 'this' represents a newer, and so
+    // more useful, truth. This method can be conservative; it will return false when it doubt.
+    bool canReplace(const AccessCase& other) const;
+
+    void dump(PrintStream& out) const;
+    
+private:
+    friend class CodeBlock;
+    friend class PolymorphicAccess;
+
+    AccessCase();
+
+    bool visitWeak(VM&) const;
+    bool propagateTransitions(SlotVisitor&) const;
+    
+    // FIXME: This only exists because of how AccessCase puts post-generation things into itself.
+    // https://bugs.webkit.org/show_bug.cgi?id=156456
+    std::unique_ptr clone() const;
+    
+    // Perform any action that must be performed before the end of the epoch in which the case
+    // was created. Returns a set of watchpoint sets that will need to be watched.
+    Vector commit(VM&, const Identifier&);
+
+    // Fall through on success. Two kinds of failures are supported: fall-through, which means that we
+    // should try a different case; and failure, which means that this was the right case but it needs
+    // help from the slow path.
+    void generateWithGuard(AccessGenerationState&, MacroAssembler::JumpList& fallThrough);
+
+    // Fall through on success, add a jump to the failure list on failure.
+    void generate(AccessGenerationState&);
+    
+    void generateImpl(AccessGenerationState&);
+    void emitIntrinsicGetter(AccessGenerationState&);
+    void emitDOMJITGetter(AccessGenerationState&, GPRReg baseForGetGPR);
+    
+    AccessType m_type { Load };
+    State m_state { Primordial };
+    PropertyOffset m_offset { invalidOffset };
+
+    // Usually this is the structure that we expect the base object to have. But, this is the *new*
+    // structure for a transition and we rely on the fact that it has a strong reference to the old
+    // structure. For proxies, this is the structure of the object behind the proxy.
+    WriteBarrier m_structure;
+
+    ObjectPropertyConditionSet m_conditionSet;
+
+    class RareData {
+        WTF_MAKE_FAST_ALLOCATED;
+    public:
+        RareData()
+            : viaProxy(false)
+            , domJIT(nullptr)
+        {
+            customAccessor.opaque = nullptr;
+        }
+        
+        bool viaProxy;
+        RefPtr additionalSet;
+        // FIXME: This should probably live in the stub routine object.
+        // https://bugs.webkit.org/show_bug.cgi?id=156456
+        std::unique_ptr callLinkInfo;
+        union {
+            PropertySlot::GetValueFunc getter;
+            PutPropertySlot::PutValueFunc setter;
+            void* opaque;
+        } customAccessor;
+        WriteBarrier customSlotBase;
+        WriteBarrier intrinsicFunction;
+        DOMJIT::GetterSetter* domJIT;
+    };
+
+    std::unique_ptr m_rareData;
+};
+
+class AccessGenerationResult {
+public:
+    enum Kind {
+        MadeNoChanges,
+        GaveUp,
+        Buffered,
+        GeneratedNewCode,
+        GeneratedFinalCode // Generated so much code that we never want to generate code again.
+    };
+    
+    AccessGenerationResult()
+    {
+    }
+    
+    AccessGenerationResult(Kind kind)
+        : m_kind(kind)
+    {
+        RELEASE_ASSERT(kind != GeneratedNewCode);
+        RELEASE_ASSERT(kind != GeneratedFinalCode);
+    }
+    
+    AccessGenerationResult(Kind kind, MacroAssemblerCodePtr code)
+        : m_kind(kind)
+        , m_code(code)
+    {
+        RELEASE_ASSERT(kind == GeneratedNewCode || kind == GeneratedFinalCode);
+        RELEASE_ASSERT(code);
+    }
+    
+    bool operator==(const AccessGenerationResult& other) const
+    {
+        return m_kind == other.m_kind && m_code == other.m_code;
+    }
+    
+    bool operator!=(const AccessGenerationResult& other) const
+    {
+        return !(*this == other);
+    }
+    
+    explicit operator bool() const
+    {
+        return *this != AccessGenerationResult();
+    }
+    
+    Kind kind() const { return m_kind; }
+    
+    const MacroAssemblerCodePtr& code() const { return m_code; }
+    
+    bool madeNoChanges() const { return m_kind == MadeNoChanges; }
+    bool gaveUp() const { return m_kind == GaveUp; }
+    bool buffered() const { return m_kind == Buffered; }
+    bool generatedNewCode() const { return m_kind == GeneratedNewCode; }
+    bool generatedFinalCode() const { return m_kind == GeneratedFinalCode; }
+    
+    // If we gave up on this attempt to generate code, or if we generated the "final" code, then we
+    // should give up after this.
+    bool shouldGiveUpNow() const { return gaveUp() || generatedFinalCode(); }
+    
+    bool generatedSomeCode() const { return generatedNewCode() || generatedFinalCode(); }
+    
+    void dump(PrintStream&) const;
+    
+private:
+    Kind m_kind;
+    MacroAssemblerCodePtr m_code;
+};
+
+class PolymorphicAccess {
+    WTF_MAKE_NONCOPYABLE(PolymorphicAccess);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    PolymorphicAccess();
+    ~PolymorphicAccess();
+
+    // When this fails (returns GaveUp), this will leave the old stub intact but you should not try
+    // to call this method again for that PolymorphicAccess instance.
+    AccessGenerationResult addCases(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, Vector, 2>);
+
+    AccessGenerationResult addCase(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, std::unique_ptr);
+    
+    AccessGenerationResult regenerate(VM&, CodeBlock*, StructureStubInfo&, const Identifier&);
+    
+    bool isEmpty() const { return m_list.isEmpty(); }
+    unsigned size() const { return m_list.size(); }
+    const AccessCase& at(unsigned i) const { return *m_list[i]; }
+    const AccessCase& operator[](unsigned i) const { return *m_list[i]; }
+
+    // If this returns false then we are requesting a reset of the owning StructureStubInfo.
+    bool visitWeak(VM&) const;
+    
+    // This returns true if it has marked everything it will ever marked. This can be used as an
+    // optimization to then avoid calling this method again during the fixpoint.
+    bool propagateTransitions(SlotVisitor&) const;
+
+    void aboutToDie();
+
+    void dump(PrintStream& out) const;
+    bool containsPC(void* pc) const
+    { 
+        if (!m_stubRoutine)
+            return false;
+
+        uintptr_t pcAsInt = bitwise_cast(pc);
+        return m_stubRoutine->startAddress() <= pcAsInt && pcAsInt <= m_stubRoutine->endAddress();
+    }
+
+private:
+    friend class AccessCase;
+    friend class CodeBlock;
+    friend struct AccessGenerationState;
+    
+    typedef Vector, 2> ListType;
+    
+    void commit(
+        VM&, std::unique_ptr&, CodeBlock*, StructureStubInfo&,
+        const Identifier&, AccessCase&);
+
+    MacroAssemblerCodePtr regenerate(
+        VM&, CodeBlock*, StructureStubInfo&, const Identifier&, ListType& cases);
+
+    ListType m_list;
+    RefPtr m_stubRoutine;
+    std::unique_ptr m_watchpoints;
+    std::unique_ptr>> m_weakReferences;
+};
+
+struct AccessGenerationState {
+    AccessGenerationState()
+        : m_calculatedRegistersForCallAndExceptionHandling(false)
+        , m_needsToRestoreRegistersIfException(false)
+        , m_calculatedCallSiteIndex(false)
+    {
+    }
+    CCallHelpers* jit { nullptr };
+    ScratchRegisterAllocator* allocator;
+    ScratchRegisterAllocator::PreservedState preservedReusedRegisterState;
+    PolymorphicAccess* access { nullptr };
+    StructureStubInfo* stubInfo { nullptr };
+    MacroAssembler::JumpList success;
+    MacroAssembler::JumpList failAndRepatch;
+    MacroAssembler::JumpList failAndIgnore;
+    GPRReg baseGPR { InvalidGPRReg };
+    JSValueRegs valueRegs;
+    GPRReg scratchGPR { InvalidGPRReg };
+    const Identifier* ident;
+    std::unique_ptr watchpoints;
+    Vector> weakReferences;
+
+    Watchpoint* addWatchpoint(const ObjectPropertyCondition& = ObjectPropertyCondition());
+
+    void restoreScratch();
+    void succeed();
+
+    struct SpillState {
+        SpillState() = default;
+        SpillState(RegisterSet&& regs, unsigned usedStackBytes)
+            : spilledRegisters(WTFMove(regs))
+            , numberOfStackBytesUsedForRegisterPreservation(usedStackBytes)
+        {
+        }
+
+        RegisterSet spilledRegisters { };
+        unsigned numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits::max() };
+
+        bool isEmpty() const { return numberOfStackBytesUsedForRegisterPreservation == std::numeric_limits::max(); }
+    };
+
+    const RegisterSet& calculateLiveRegistersForCallAndExceptionHandling();
+
+    SpillState preserveLiveRegistersToStackForCall(const RegisterSet& extra = RegisterSet());
+
+    void restoreLiveRegistersFromStackForCallWithThrownException(const SpillState&);
+    void restoreLiveRegistersFromStackForCall(const SpillState&, const RegisterSet& dontRestore = RegisterSet());
+
+    const RegisterSet& liveRegistersForCall();
+
+    CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal();
+    CallSiteIndex callSiteIndexForExceptionHandling()
+    {
+        RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+        RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+        RELEASE_ASSERT(m_calculatedCallSiteIndex);
+        return m_callSiteIndex;
+    }
+
+    const HandlerInfo& originalExceptionHandler();
+
+    bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; }
+    CallSiteIndex originalCallSiteIndex() const;
+    
+    void emitExplicitExceptionHandler();
+
+    void setSpillStateForJSGetterSetter(SpillState& spillState)
+    {
+        if (!m_spillStateForJSGetterSetter.isEmpty()) {
+            ASSERT(m_spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation == spillState.numberOfStackBytesUsedForRegisterPreservation);
+            ASSERT(m_spillStateForJSGetterSetter.spilledRegisters == spillState.spilledRegisters);
+        }
+        m_spillStateForJSGetterSetter = spillState;
+    }
+    SpillState spillStateForJSGetterSetter() const { return m_spillStateForJSGetterSetter; }
+    
+private:
+    const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite();
+    
+    RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+    RegisterSet m_liveRegistersForCall;
+    CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits::max()) };
+    SpillState m_spillStateForJSGetterSetter;
+    bool m_calculatedRegistersForCallAndExceptionHandling : 1;
+    bool m_needsToRestoreRegistersIfException : 1;
+    bool m_calculatedCallSiteIndex : 1;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::AccessGenerationResult::Kind);
+void printInternal(PrintStream&, JSC::AccessCase::AccessType);
+void printInternal(PrintStream&, JSC::AccessCase::State);
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
diff --git a/bytecode/PreciseJumpTargets.cpp b/bytecode/PreciseJumpTargets.cpp
new file mode 100644
index 0000000..9c06e7e
--- /dev/null
+++ b/bytecode/PreciseJumpTargets.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PreciseJumpTargets.h"
+
+#include "InterpreterInlines.h"
+#include "JSCInlines.h"
+#include "PreciseJumpTargetsInlines.h"
+
+namespace JSC {
+
+template 
+static void getJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+{
+    OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+    extractStoredJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+        out.append(bytecodeOffset + relativeOffset);
+    });
+    // op_loop_hint does not have jump target stored in bytecode instructions.
+    if (opcodeID == op_loop_hint)
+        out.append(bytecodeOffset);
+}
+
+enum class ComputePreciseJumpTargetsMode {
+    FollowCodeBlockClaim,
+    ForceCompute,
+};
+
+template
+void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    ASSERT(out.isEmpty());
+    
+    // We will derive a superset of the jump targets that the code block thinks it has.
+    // So, if the code block claims there are none, then we are done.
+    if (Mode == ComputePreciseJumpTargetsMode::FollowCodeBlockClaim && !codeBlock->numberOfJumpTargets())
+        return;
+    
+    for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) {
+        out.append(codeBlock->exceptionHandler(i).target);
+        out.append(codeBlock->exceptionHandler(i).start);
+        out.append(codeBlock->exceptionHandler(i).end);
+    }
+
+    Interpreter* interpreter = codeBlock->vm()->interpreter;
+    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {
+        OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+        getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out);
+        bytecodeOffset += opcodeLengths[opcodeID];
+    }
+    
+    std::sort(out.begin(), out.end());
+    
+    // We will have duplicates, and we must remove them.
+    unsigned toIndex = 0;
+    unsigned fromIndex = 0;
+    unsigned lastValue = UINT_MAX;
+    while (fromIndex < out.size()) {
+        unsigned value = out[fromIndex++];
+        if (value == lastValue)
+            continue;
+        out[toIndex++] = value;
+        lastValue = value;
+    }
+    out.resize(toIndex);
+    out.shrinkToFit();
+}
+
+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, codeBlock->instructions().begin(), codeBlock->instructions().size(), out);
+}
+
+void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out)
+{
+    computePreciseJumpTargetsInternal(codeBlock, instructionsBegin, instructionCount, out);
+}
+
+void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+{
+    getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
+}
+
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector& out)
+{
+    getJumpTargetsForBytecodeOffset(codeBlock, codeBlock->vm()->interpreter, instructionsBegin, bytecodeOffset, out);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/PreciseJumpTargets.h b/bytecode/PreciseJumpTargets.h
new file mode 100644
index 0000000..bcc9346
--- /dev/null
+++ b/bytecode/PreciseJumpTargets.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedCodeBlock;
+struct UnlinkedInstruction;
+
+// Return a sorted list of bytecode index that are the destination of a jump.
+void computePreciseJumpTargets(CodeBlock*, Vector& out);
+void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector& out);
+void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out);
+
+void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector& out);
+
+void findJumpTargetsForBytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector& out);
+void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector& out);
+
+} // namespace JSC
diff --git a/bytecode/PreciseJumpTargetsInlines.h b/bytecode/PreciseJumpTargetsInlines.h
new file mode 100644
index 0000000..19fdcdc
--- /dev/null
+++ b/bytecode/PreciseJumpTargetsInlines.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "InterpreterInlines.h"
+#include "Opcode.h"
+#include "PreciseJumpTargets.h"
+
+namespace JSC {
+
+template
+inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function)
+{
+    OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset]);
+    Instruction* current = instructionsBegin + bytecodeOffset;
+    switch (opcodeID) {
+    case op_jmp:
+        function(current[1].u.operand);
+        break;
+    case op_jtrue:
+    case op_jfalse:
+    case op_jeq_null:
+    case op_jneq_null:
+        function(current[2].u.operand);
+        break;
+    case op_jneq_ptr:
+    case op_jless:
+    case op_jlesseq:
+    case op_jgreater:
+    case op_jgreatereq:
+    case op_jnless:
+    case op_jnlesseq:
+    case op_jngreater:
+    case op_jngreatereq:
+        function(current[3].u.operand);
+        break;
+    case op_switch_imm:
+    case op_switch_char: {
+        auto& table = codeBlock->switchJumpTable(current[1].u.operand);
+        for (unsigned i = table.branchOffsets.size(); i--;)
+            function(table.branchOffsets[i]);
+        function(current[2].u.operand);
+        break;
+    }
+    case op_switch_string: {
+        auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand);
+        auto iter = table.offsetTable.begin();
+        auto end = table.offsetTable.end();
+        for (; iter != end; ++iter)
+            function(iter->value.branchOffset);
+        function(current[2].u.operand);
+        break;
+    }
+    default:
+        break;
+    }
+}
+
+} // namespace JSC
diff --git a/bytecode/ProgramCodeBlock.cpp b/bytecode/ProgramCodeBlock.cpp
new file mode 100644
index 0000000..b4fac57
--- /dev/null
+++ b/bytecode/ProgramCodeBlock.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ProgramCodeBlock.h"
+
+namespace JSC {
+
+const ClassInfo ProgramCodeBlock::s_info = {
+    "ProgramCodeBlock", &Base::s_info, 0,
+    CREATE_METHOD_TABLE(ProgramCodeBlock)
+};
+
+void ProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~ProgramCodeBlock();
+}
+
+} // namespace JSC
diff --git a/bytecode/ProgramCodeBlock.h b/bytecode/ProgramCodeBlock.h
new file mode 100644
index 0000000..94c7a7a
--- /dev/null
+++ b/bytecode/ProgramCodeBlock.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "GlobalCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+namespace JSC {
+
+class ProgramCodeBlock : public GlobalCodeBlock {
+public:
+    typedef GlobalCodeBlock Base;
+    DECLARE_INFO;
+
+    static ProgramCodeBlock* create(VM* vm, CopyParsedBlockTag, ProgramCodeBlock& other)
+    {
+        ProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), CopyParsedBlock, other);
+        instance->finishCreation(*vm, CopyParsedBlock, other);
+        return instance;
+    }
+
+    static ProgramCodeBlock* create(VM* vm, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset)
+    {
+        ProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap))
+            ProgramCodeBlock(vm, vm->programCodeBlockStructure.get(), ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, firstLineColumnOffset);
+        instance->finishCreation(*vm, ownerExecutable, unlinkedCodeBlock, scope);
+        return instance;
+    }
+
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+    {
+        return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
+    }
+
+private:
+    ProgramCodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, ProgramCodeBlock& other)
+        : GlobalCodeBlock(vm, structure, CopyParsedBlock, other)
+    {
+    }
+
+    ProgramCodeBlock(VM* vm, Structure* structure, ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock,
+        JSScope* scope, PassRefPtr sourceProvider, unsigned firstLineColumnOffset)
+        : GlobalCodeBlock(vm, structure, ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
+    {
+    }
+
+    static void destroy(JSCell*);
+};
+
+} // namespace JSC
diff --git a/bytecode/PropertyCondition.cpp b/bytecode/PropertyCondition.cpp
new file mode 100644
index 0000000..cb19283
--- /dev/null
+++ b/bytecode/PropertyCondition.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PropertyCondition.h"
+
+#include "GetterSetter.h"
+#include "JSCInlines.h"
+#include "TrackedReferences.h"
+
+namespace JSC {
+
+static bool verbose = false;
+
+void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    if (!*this) {
+        out.print("");
+        return;
+    }
+    
+    out.print(m_kind, " of ", m_uid);
+    switch (m_kind) {
+    case Presence:
+        out.print(" at ", offset(), " with attributes ", attributes());
+        return;
+    case Absence:
+    case AbsenceOfSetter:
+        out.print(" with prototype ", inContext(JSValue(prototype()), context));
+        return;
+    case Equivalence:
+        out.print(" with ", inContext(requiredValue(), context));
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void PropertyCondition::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint(
+    Structure* structure, JSObject* base) const
+{
+    if (verbose) {
+        dataLog(
+            "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ",
+            JSValue(base), " assuming impure property watchpoints are set.\n");
+    }
+    
+    if (!*this) {
+        if (verbose)
+            dataLog("Invalid because unset.\n");
+        return false;
+    }
+    
+    if (!structure->propertyAccessesAreCacheable()) {
+        if (verbose)
+            dataLog("Invalid because accesses are not cacheable.\n");
+        return false;
+    }
+    
+    switch (m_kind) {
+    case Presence: {
+        unsigned currentAttributes;
+        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+        if (currentOffset != offset() || currentAttributes != attributes()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(),
+                    " but they are ", currentOffset, ", ", currentAttributes, "\n");
+            }
+            return false;
+        }
+        return true;
+    }
+        
+    case Absence: {
+        if (structure->isDictionary()) {
+            if (verbose)
+                dataLog("Invalid because it's a dictionary.\n");
+            return false;
+        }
+
+        PropertyOffset currentOffset = structure->getConcurrently(uid());
+        if (currentOffset != invalidOffset) {
+            if (verbose)
+                dataLog("Invalid because the property exists at offset: ", currentOffset, "\n");
+            return false;
+        }
+        
+        if (structure->storedPrototypeObject() != prototype()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+                    "it should have been ", JSValue(prototype()), "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    }
+    
+    case AbsenceOfSetter: {
+        if (structure->isDictionary()) {
+            if (verbose)
+                dataLog("Invalid because it's a dictionary.\n");
+            return false;
+        }
+        
+        unsigned currentAttributes;
+        PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes);
+        if (currentOffset != invalidOffset) {
+            if (currentAttributes & (Accessor | CustomAccessor)) {
+                if (verbose) {
+                    dataLog(
+                        "Invalid because we expected not to have a setter, but we have one at offset ",
+                        currentOffset, " with attributes ", currentAttributes, "\n");
+                }
+                return false;
+            }
+        }
+        
+        if (structure->storedPrototypeObject() != prototype()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the prototype is ", structure->storedPrototype(), " even though "
+                    "it should have been ", JSValue(prototype()), "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    }
+        
+    case Equivalence: {
+        if (!base || base->structure() != structure) {
+            // Conservatively return false, since we cannot verify this one without having the
+            // object.
+            if (verbose) {
+                dataLog(
+                    "Invalid because we don't have a base or the base has the wrong structure: ",
+                    RawPointer(base), "\n");
+            }
+            return false;
+        }
+        
+        // FIXME: This is somewhat racy, and maybe more risky than we want.
+        // https://bugs.webkit.org/show_bug.cgi?id=134641
+        
+        PropertyOffset currentOffset = structure->getConcurrently(uid());
+        if (currentOffset == invalidOffset) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the base no long appears to have ", uid(), " on its structure: ",
+                        RawPointer(base), "\n");
+            }
+            return false;
+        }
+
+        JSValue currentValue = base->getDirect(currentOffset);
+        if (currentValue != requiredValue()) {
+            if (verbose) {
+                dataLog(
+                    "Invalid because the value is ", currentValue, " but we require ", requiredValue(),
+                    "\n");
+            }
+            return false;
+        }
+        
+        return true;
+    } }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return false;
+}
+
+bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const
+{
+    if (!*this)
+        return false;
+    
+    switch (m_kind) {
+    case Presence:
+    case Absence:
+    case Equivalence:
+        return structure->needImpurePropertyWatchpoint();
+    default:
+        return false;
+    }
+}
+
+bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const
+{
+    if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base))
+        return false;
+
+    // Currently we assume that an impure property can cause a property to appear, and can also
+    // "shadow" an existing JS property on the same object. Hence it affects both presence and
+    // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters.
+    switch (m_kind) {
+    case Absence:
+        if (structure->typeInfo().getOwnPropertySlotIsImpure() || structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+            return false;
+        break;
+    case Presence:
+    case Equivalence:
+        if (structure->typeInfo().getOwnPropertySlotIsImpure())
+            return false;
+        break;
+    default:
+        break;
+    }
+    
+    return true;
+}
+
+bool PropertyCondition::isWatchableWhenValid(
+    Structure* structure, WatchabilityEffort effort) const
+{
+    if (structure->transitionWatchpointSetHasBeenInvalidated())
+        return false;
+    
+    switch (m_kind) {
+    case Equivalence: {
+        PropertyOffset offset = structure->getConcurrently(uid());
+        
+        // This method should only be called when some variant of isValid returned true, which
+        // implies that we already confirmed that the structure knows of the property. We should
+        // also have verified that the Structure is a cacheable dictionary, which means we
+        // shouldn't have a TOCTOU race either.
+        RELEASE_ASSERT(offset != invalidOffset);
+        
+        WatchpointSet* set = nullptr;
+        switch (effort) {
+        case MakeNoChanges:
+            set = structure->propertyReplacementWatchpointSet(offset);
+            break;
+        case EnsureWatchability:
+            set = structure->ensurePropertyReplacementWatchpointSet(
+                *Heap::heap(structure)->vm(), offset);
+            break;
+        }
+        
+        if (!set || !set->isStillValid())
+            return false;
+        
+        break;
+    }
+        
+    default:
+        break;
+    }
+    
+    return true;
+}
+
+bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint(
+    Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+    return isStillValidAssumingImpurePropertyWatchpoint(structure, base)
+        && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isWatchable(
+    Structure* structure, JSObject* base, WatchabilityEffort effort) const
+{
+    return isStillValid(structure, base)
+        && isWatchableWhenValid(structure, effort);
+}
+
+bool PropertyCondition::isStillLive() const
+{
+    if (hasPrototype() && prototype() && !Heap::isMarked(prototype()))
+        return false;
+    
+    if (hasRequiredValue()
+        && requiredValue()
+        && requiredValue().isCell()
+        && !Heap::isMarked(requiredValue().asCell()))
+        return false;
+    
+    return true;
+}
+
+void PropertyCondition::validateReferences(const TrackedReferences& tracked) const
+{
+    if (hasPrototype())
+        tracked.check(prototype());
+    
+    if (hasRequiredValue())
+        tracked.check(requiredValue());
+}
+
+bool PropertyCondition::isValidValueForAttributes(JSValue value, unsigned attributes)
+{
+    bool attributesClaimAccessor = !!(attributes & Accessor);
+    bool valueClaimsAccessor = !!jsDynamicCast(value);
+    return attributesClaimAccessor == valueClaimsAccessor;
+}
+
+bool PropertyCondition::isValidValueForPresence(JSValue value) const
+{
+    return isValidValueForAttributes(value, attributes());
+}
+
+PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const
+{
+    Structure* structure = base->structure();
+    if (!structure->isValidOffset(offset()))
+        return PropertyCondition();
+    JSValue value = base->getDirect(offset());
+    if (!isValidValueForPresence(value))
+        return PropertyCondition();
+    return equivalenceWithoutBarrier(uid(), value);
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition)
+{
+    switch (condition) {
+    case JSC::PropertyCondition::Presence:
+        out.print("Presence");
+        return;
+    case JSC::PropertyCondition::Absence:
+        out.print("Absence");
+        return;
+    case JSC::PropertyCondition::AbsenceOfSetter:
+        out.print("Absence");
+        return;
+    case JSC::PropertyCondition::Equivalence:
+        out.print("Equivalence");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
diff --git a/bytecode/PropertyCondition.h b/bytecode/PropertyCondition.h
new file mode 100644
index 0000000..ea85b18
--- /dev/null
+++ b/bytecode/PropertyCondition.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSObject.h"
+#include 
+
+namespace JSC {
+
+class TrackedReferences;
+
+class PropertyCondition {
+public:
+    enum Kind {
+        Presence,
+        Absence,
+        AbsenceOfSetter,
+        Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure.
+    };
+    
+    PropertyCondition()
+        : m_uid(nullptr)
+        , m_kind(Presence)
+    {
+        memset(&u, 0, sizeof(u));
+    }
+    
+    PropertyCondition(WTF::HashTableDeletedValueType)
+        : m_uid(nullptr)
+        , m_kind(Absence)
+    {
+        memset(&u, 0, sizeof(u));
+    }
+    
+    static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Presence;
+        result.u.presence.offset = offset;
+        result.u.presence.attributes = attributes;
+        return result;
+    }
+    
+    static PropertyCondition presence(
+        VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes)
+    {
+        return presenceWithoutBarrier(uid, offset, attributes);
+    }
+
+    // NOTE: The prototype is the storedPrototype not the prototypeForLookup.
+    static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Absence;
+        result.u.absence.prototype = prototype;
+        return result;
+    }
+    
+    static PropertyCondition absence(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceWithoutBarrier(uid, prototype);
+    }
+    
+    static PropertyCondition absenceOfSetterWithoutBarrier(
+        UniquedStringImpl* uid, JSObject* prototype)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = AbsenceOfSetter;
+        result.u.absence.prototype = prototype;
+        return result;
+    }
+    
+    static PropertyCondition absenceOfSetter(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype)
+    {
+        if (owner)
+            vm.heap.writeBarrier(owner);
+        return absenceOfSetterWithoutBarrier(uid, prototype);
+    }
+    
+    static PropertyCondition equivalenceWithoutBarrier(
+        UniquedStringImpl* uid, JSValue value)
+    {
+        PropertyCondition result;
+        result.m_uid = uid;
+        result.m_kind = Equivalence;
+        result.u.equivalence.value = JSValue::encode(value);
+        return result;
+    }
+        
+    static PropertyCondition equivalence(
+        VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value)
+    {
+        if (value.isCell() && owner)
+            vm.heap.writeBarrier(owner);
+        return equivalenceWithoutBarrier(uid, value);
+    }
+    
+    explicit operator bool() const { return m_uid || m_kind != Presence; }
+    
+    Kind kind() const { return m_kind; }
+    UniquedStringImpl* uid() const { return m_uid; }
+    
+    bool hasOffset() const { return !!*this && m_kind == Presence; };
+    PropertyOffset offset() const
+    {
+        ASSERT(hasOffset());
+        return u.presence.offset;
+    }
+    bool hasAttributes() const { return !!*this && m_kind == Presence; };
+    unsigned attributes() const
+    {
+        ASSERT(hasAttributes());
+        return u.presence.attributes;
+    }
+    
+    bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); }
+    JSObject* prototype() const
+    {
+        ASSERT(hasPrototype());
+        return u.absence.prototype;
+    }
+    
+    bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; }
+    JSValue requiredValue() const
+    {
+        ASSERT(hasRequiredValue());
+        return JSValue::decode(u.equivalence.value);
+    }
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+    
+    unsigned hash() const
+    {
+        unsigned result = WTF::PtrHash::hash(m_uid) + static_cast(m_kind);
+        switch (m_kind) {
+        case Presence:
+            result ^= u.presence.offset;
+            result ^= u.presence.attributes;
+            break;
+        case Absence:
+        case AbsenceOfSetter:
+            result ^= WTF::PtrHash::hash(u.absence.prototype);
+            break;
+        case Equivalence:
+            result ^= EncodedJSValueHash::hash(u.equivalence.value);
+            break;
+        }
+        return result;
+    }
+    
+    bool operator==(const PropertyCondition& other) const
+    {
+        if (m_uid != other.m_uid)
+            return false;
+        if (m_kind != other.m_kind)
+            return false;
+        switch (m_kind) {
+        case Presence:
+            return u.presence.offset == other.u.presence.offset
+                && u.presence.attributes == other.u.presence.attributes;
+        case Absence:
+        case AbsenceOfSetter:
+            return u.absence.prototype == other.u.absence.prototype;
+        case Equivalence:
+            return u.equivalence.value == other.u.equivalence.value;
+        }
+        RELEASE_ASSERT_NOT_REACHED();
+        return false;
+    }
+    
+    bool isHashTableDeletedValue() const
+    {
+        return !m_uid && m_kind == Absence;
+    }
+    
+    // Two conditions are compatible if they are identical or if they speak of different uids. If
+    // false is returned, you have to decide how to resolve the conflict - for example if there is
+    // a Presence and an Equivalence then in some cases you'll want the more general of the two
+    // while in other cases you'll want the more specific of the two. This will also return false
+    // for contradictions, like Presence and Absence on the same uid. By convention, invalid
+    // conditions aren't compatible with anything.
+    bool isCompatibleWith(const PropertyCondition& other) const
+    {
+        if (!*this || !other)
+            return false;
+        return *this == other || uid() != other.uid();
+    }
+    
+    // Checks if the object's structure claims that the property won't be intercepted.
+    bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const;
+    
+    // Returns true if we need an impure property watchpoint to ensure validity even if
+    // isStillValidAccordingToStructure() returned true.
+    bool validityRequiresImpurePropertyWatchpoint(Structure*) const;
+    
+    // Checks if the condition is still valid right now for the given object and structure.
+    // May conservatively return false, if the object and structure alone don't guarantee the
+    // condition. This happens for an Absence condition on an object that may have impure
+    // properties. If the object is not supplied, then a "true" return indicates that checking if
+    // an object has the given structure guarantees the condition still holds. If an object is
+    // supplied, then you may need to use some other watchpoints on the object to guarantee the
+    // condition in addition to the structure check.
+    bool isStillValid(Structure*, JSObject* base = nullptr) const;
+    
+    // In some cases, the condition is not watchable, but could be made watchable by enabling the
+    // appropriate watchpoint. For example, replacement watchpoints are enabled only when some
+    // access is cached on the property in some structure. This is mainly to save space for
+    // dictionary properties or properties that never get very hot. But, it's always safe to
+    // enable watching, provided that this is called from the main thread.
+    enum WatchabilityEffort {
+        // This is the default. It means that we don't change the state of any Structure or
+        // object, and implies that if the property happens not to be watchable then we don't make
+        // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat
+        // preferable when first deciding whether to watch a condition for the first time (i.e.
+        // not from a watchpoint fire that causes us to see if we should adapt), since a
+        // watchpoint not being initialized for watching implies that maybe we don't know enough
+        // yet to make it profitable to watch -- as in, the thing being watched may not have
+        // stabilized yet. We prefer to only assume that a condition will hold if it has been
+        // known to hold for a while already.
+        MakeNoChanges,
+        
+        // Do what it takes to ensure that the property can be watched, if doing so has no
+        // user-observable effect. For now this just means that we will ensure that a property
+        // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this
+        // from JIT threads, since the act of enabling watchpoints is not thread-safe.
+        EnsureWatchability
+    };
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure and possibly an impure property watchpoint.
+    bool isWatchableAssumingImpurePropertyWatchpoint(
+        Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+    
+    // This means that it's still valid and we could enforce validity by setting a transition
+    // watchpoint on the structure.
+    bool isWatchable(
+        Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const;
+    
+    bool watchingRequiresStructureTransitionWatchpoint() const
+    {
+        // Currently, this is required for all of our conditions.
+        return !!*this;
+    }
+    bool watchingRequiresReplacementWatchpoint() const
+    {
+        return !!*this && m_kind == Equivalence;
+    }
+    
+    // This means that the objects involved in this are still live.
+    bool isStillLive() const;
+    
+    void validateReferences(const TrackedReferences&) const;
+
+    static bool isValidValueForAttributes(JSValue value, unsigned attributes);
+
+    bool isValidValueForPresence(JSValue) const;
+
+    PropertyCondition attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const;
+
+private:
+    bool isWatchableWhenValid(Structure*, WatchabilityEffort) const;
+
+    UniquedStringImpl* m_uid;
+    Kind m_kind;
+    union {
+        struct {
+            PropertyOffset offset;
+            unsigned attributes;
+        } presence;
+        struct {
+            JSObject* prototype;
+        } absence;
+        struct {
+            EncodedJSValue value;
+        } equivalence;
+    } u;
+};
+
+struct PropertyConditionHash {
+    static unsigned hash(const PropertyCondition& key) { return key.hash(); }
+    static bool equal(
+        const PropertyCondition& a, const PropertyCondition& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::PropertyCondition::Kind);
+
+template struct DefaultHash;
+template<> struct DefaultHash {
+    typedef JSC::PropertyConditionHash Hash;
+};
+
+template struct HashTraits;
+template<> struct HashTraits : SimpleClassHashTraits { };
+
+} // namespace WTF
diff --git a/bytecode/PutByIdFlags.cpp b/bytecode/PutByIdFlags.cpp
new file mode 100644
index 0000000..f280900
--- /dev/null
+++ b/bytecode/PutByIdFlags.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PutByIdFlags.h"
+
+#include "InferredType.h"
+#include 
+#include 
+#include 
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, PutByIdFlags flags) {
+    CommaPrinter comma("|");
+    if (flags & PutByIdIsDirect)
+        out.print(comma, "IsDirect");
+
+    InferredType::Kind kind = InferredType::kindForFlags(flags);
+    out.print(comma, kind);
+    if (InferredType::hasStructure(kind))
+        out.print(":", bitwise_cast(decodeStructureID(flags)));
+}
+
+} // namespace WTF
+
diff --git a/bytecode/PutByIdFlags.h b/bytecode/PutByIdFlags.h
new file mode 100644
index 0000000..dc2c5e2
--- /dev/null
+++ b/bytecode/PutByIdFlags.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "StructureIDTable.h"
+
+namespace JSC {
+
+enum PutByIdFlags : intptr_t {
+    PutByIdNone = 0,
+
+    // This flag indicates that the put_by_id is direct. That means that we store the property without
+    // checking if the prototype chain has a setter.
+    PutByIdIsDirect = 0x1,
+    PutByIdPersistentFlagsMask = 0x1,
+
+    // NOTE: The values below must be in sync with what is in LowLevelInterpreter.asm.
+
+    // Determining the required inferred type involves first checking the primary type mask, and then
+    // using that to figure out the meaning of the secondary mask:
+    // switch (flags & PutByIdPrimaryTypeMask) {
+    // case PutByIdPrimaryTypeSecondary:
+    //     switch (flags & PutByIdSecondaryTypeMask) {
+    //     ...
+    //     }
+    //     break;
+    // case PutByIdPrimaryTypeObjectWithStructure:
+    // case PutByIdPrimaryTypeObjectWithStructureOrOther:
+    //     StructureID structureID = decodeStructureID(flags);
+    //     break;
+    // }
+    PutByIdPrimaryTypeMask = 0x6,
+    PutByIdPrimaryTypeSecondary = 0x0, // Need to check the secondary type mask for the type.
+    PutByIdPrimaryTypeObjectWithStructure = 0x2, // Secondary type has structure ID.
+    PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4, // Secondary type has structure ID.
+
+    PutByIdSecondaryTypeMask = -0x8,
+    PutByIdSecondaryTypeBottom = 0x0,
+    PutByIdSecondaryTypeBoolean = 0x8,
+    PutByIdSecondaryTypeOther = 0x10,
+    PutByIdSecondaryTypeInt32 = 0x18,
+    PutByIdSecondaryTypeNumber = 0x20,
+    PutByIdSecondaryTypeString = 0x28,
+    PutByIdSecondaryTypeSymbol = 0x30,
+    PutByIdSecondaryTypeObject = 0x38,
+    PutByIdSecondaryTypeObjectOrOther = 0x40,
+    PutByIdSecondaryTypeTop = 0x48
+};
+
+inline PutByIdFlags encodeStructureID(StructureID id)
+{
+#if USE(JSVALUE64)
+    return static_cast(static_cast(id) << 3);
+#else
+    PutByIdFlags result = bitwise_cast(id);
+    ASSERT(!(result & ~PutByIdSecondaryTypeMask));
+    return result;
+#endif
+}
+
+inline StructureID decodeStructureID(PutByIdFlags flags)
+{
+#if USE(JSVALUE64)
+    return static_cast(flags >> 3);
+#else
+    return bitwise_cast(flags & PutByIdSecondaryTypeMask);
+#endif
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::PutByIdFlags);
+
+} // namespace WTF
diff --git a/bytecode/PutByIdStatus.cpp b/bytecode/PutByIdStatus.cpp
new file mode 100644
index 0000000..fc0a472
--- /dev/null
+++ b/bytecode/PutByIdStatus.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PutByIdStatus.h"
+
+#include "CodeBlock.h"
+#include "ComplexGetStatus.h"
+#include "LLIntData.h"
+#include "LowLevelInterpreter.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include "StructureStubInfo.h"
+#include 
+
+namespace JSC {
+
+bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
+{
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].attemptToMerge(variant))
+            return true;
+    }
+    for (unsigned i = 0; i < m_variants.size(); ++i) {
+        if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
+            return false;
+    }
+    m_variants.append(variant);
+    return true;
+}
+
+#if ENABLE(DFG_JIT)
+bool PutByIdStatus::hasExitSite(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+    return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
+        || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
+    
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
+{
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+    UNUSED_PARAM(uid);
+
+    VM& vm = *profiledBlock->vm();
+    
+    Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+    StructureID structureID = instruction[4].u.structureID;
+    if (!structureID)
+        return PutByIdStatus(NoInformation);
+    
+    Structure* structure = vm.heap.structureIDTable().get(structureID);
+
+    StructureID newStructureID = instruction[6].u.structureID;
+    if (!newStructureID) {
+        PropertyOffset offset = structure->getConcurrently(uid);
+        if (!isValidOffset(offset))
+            return PutByIdStatus(NoInformation);
+        
+        return PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+    }
+
+    Structure* newStructure = vm.heap.structureIDTable().get(newStructureID);
+    
+    ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
+    
+    PropertyOffset offset = newStructure->getConcurrently(uid);
+    if (!isValidOffset(offset))
+        return PutByIdStatus(NoInformation);
+    
+    ObjectPropertyConditionSet conditionSet;
+    if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) {
+        conditionSet =
+            generateConditionsForPropertySetterMissConcurrently(
+                *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid);
+        if (!conditionSet.isValid())
+            return PutByIdStatus(NoInformation);
+    }
+    
+    return PutByIdVariant::transition(
+        structure, newStructure, conditionSet, offset, newStructure->inferredTypeDescriptorFor(uid));
+}
+
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
+{
+    ConcurrentJSLocker locker(profiledBlock->m_lock);
+    
+    UNUSED_PARAM(profiledBlock);
+    UNUSED_PARAM(bytecodeIndex);
+    UNUSED_PARAM(uid);
+#if ENABLE(DFG_JIT)
+    if (hasExitSite(locker, profiledBlock, bytecodeIndex))
+        return PutByIdStatus(TakesSlowPath);
+    
+    StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
+    PutByIdStatus result = computeForStubInfo(
+        locker, profiledBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
+    if (!result)
+        return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
+    
+    return result;
+#else // ENABLE(JIT)
+    UNUSED_PARAM(map);
+    return PutByIdStatus(NoInformation);
+#endif // ENABLE(JIT)
+}
+
+#if ENABLE(JIT)
+PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJSLocker& locker, CodeBlock* baselineBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+    return computeForStubInfo(
+        locker, baselineBlock, stubInfo, uid,
+        CallLinkStatus::computeExitSiteData(locker, baselineBlock, codeOrigin.bytecodeIndex));
+}
+
+PutByIdStatus PutByIdStatus::computeForStubInfo(
+    const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
+    UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
+{
+    if (!stubInfo || !stubInfo->everConsidered)
+        return PutByIdStatus();
+    
+    if (stubInfo->tookSlowPath)
+        return PutByIdStatus(TakesSlowPath);
+    
+    switch (stubInfo->cacheType) {
+    case CacheType::Unset:
+        // This means that we attempted to cache but failed for some reason.
+        return PutByIdStatus(TakesSlowPath);
+        
+    case CacheType::PutByIdReplace: {
+        PropertyOffset offset =
+            stubInfo->u.byIdSelf.baseObjectStructure->getConcurrently(uid);
+        if (isValidOffset(offset)) {
+            return PutByIdVariant::replace(
+                stubInfo->u.byIdSelf.baseObjectStructure.get(), offset, InferredType::Top);
+        }
+        return PutByIdStatus(TakesSlowPath);
+    }
+        
+    case CacheType::Stub: {
+        PolymorphicAccess* list = stubInfo->u.stub;
+        
+        PutByIdStatus result;
+        result.m_state = Simple;
+        
+        State slowPathState = TakesSlowPath;
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.doesCalls())
+                slowPathState = MakesCalls;
+        }
+        
+        for (unsigned i = 0; i < list->size(); ++i) {
+            const AccessCase& access = list->at(i);
+            if (access.viaProxy())
+                return PutByIdStatus(slowPathState);
+            
+            PutByIdVariant variant;
+            
+            switch (access.type()) {
+            case AccessCase::Replace: {
+                Structure* structure = access.structure();
+                PropertyOffset offset = structure->getConcurrently(uid);
+                if (!isValidOffset(offset))
+                    return PutByIdStatus(slowPathState);
+                variant = PutByIdVariant::replace(
+                    structure, offset, structure->inferredTypeDescriptorFor(uid));
+                break;
+            }
+                
+            case AccessCase::Transition: {
+                PropertyOffset offset =
+                    access.newStructure()->getConcurrently(uid);
+                if (!isValidOffset(offset))
+                    return PutByIdStatus(slowPathState);
+                ObjectPropertyConditionSet conditionSet = access.conditionSet();
+                if (!conditionSet.structuresEnsureValidity())
+                    return PutByIdStatus(slowPathState);
+                variant = PutByIdVariant::transition(
+                    access.structure(), access.newStructure(), conditionSet, offset,
+                    access.newStructure()->inferredTypeDescriptorFor(uid));
+                break;
+            }
+                
+            case AccessCase::Setter: {
+                Structure* structure = access.structure();
+                
+                ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
+                    structure, access.conditionSet(), uid);
+                
+                switch (complexGetStatus.kind()) {
+                case ComplexGetStatus::ShouldSkip:
+                    continue;
+                    
+                case ComplexGetStatus::TakesSlowPath:
+                    return PutByIdStatus(slowPathState);
+                    
+                case ComplexGetStatus::Inlineable: {
+                    std::unique_ptr callLinkStatus =
+                        std::make_unique();
+                    if (CallLinkInfo* callLinkInfo = access.callLinkInfo()) {
+                        *callLinkStatus = CallLinkStatus::computeFor(
+                            locker, profiledBlock, *callLinkInfo, callExitSiteData);
+                    }
+                    
+                    variant = PutByIdVariant::setter(
+                        structure, complexGetStatus.offset(), complexGetStatus.conditionSet(),
+                        WTFMove(callLinkStatus));
+                } }
+                break;
+            }
+                
+            case AccessCase::CustomValueSetter:
+            case AccessCase::CustomAccessorSetter:
+                return PutByIdStatus(MakesCalls);
+
+            default:
+                return PutByIdStatus(slowPathState);
+            }
+            
+            if (!result.appendVariant(variant))
+                return PutByIdStatus(slowPathState);
+        }
+        
+        return result;
+    }
+        
+    default:
+        return PutByIdStatus(TakesSlowPath);
+    }
+}
+#endif
+
+PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
+{
+#if ENABLE(DFG_JIT)
+    if (dfgBlock) {
+        CallLinkStatus::ExitSiteData exitSiteData;
+        {
+            ConcurrentJSLocker locker(baselineBlock->m_lock);
+            if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
+                return PutByIdStatus(TakesSlowPath);
+            exitSiteData = CallLinkStatus::computeExitSiteData(
+                locker, baselineBlock, codeOrigin.bytecodeIndex);
+        }
+            
+        PutByIdStatus result;
+        {
+            ConcurrentJSLocker locker(dfgBlock->m_lock);
+            result = computeForStubInfo(
+                locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
+        }
+        
+        // We use TakesSlowPath in some cases where the stub was unset. That's weird and
+        // it would be better not to do that. But it means that we have to defend
+        // ourselves here.
+        if (result.isSimple())
+            return result;
+    }
+#else
+    UNUSED_PARAM(dfgBlock);
+    UNUSED_PARAM(dfgMap);
+#endif
+
+    return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
+}
+
+PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
+{
+    if (parseIndex(*uid))
+        return PutByIdStatus(TakesSlowPath);
+
+    if (set.isEmpty())
+        return PutByIdStatus();
+    
+    PutByIdStatus result;
+    result.m_state = Simple;
+    for (unsigned i = 0; i < set.size(); ++i) {
+        Structure* structure = set[i];
+        
+        if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
+            return PutByIdStatus(TakesSlowPath);
+
+        if (!structure->propertyAccessesAreCacheable())
+            return PutByIdStatus(TakesSlowPath);
+    
+        unsigned attributes;
+        PropertyOffset offset = structure->getConcurrently(uid, attributes);
+        if (isValidOffset(offset)) {
+            if (attributes & CustomAccessor)
+                return PutByIdStatus(MakesCalls);
+
+            if (attributes & (Accessor | ReadOnly))
+                return PutByIdStatus(TakesSlowPath);
+            
+            WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
+            if (!replaceSet || replaceSet->isStillValid()) {
+                // When this executes, it'll create, and fire, this replacement watchpoint set.
+                // That means that  this has probably never executed or that something fishy is
+                // going on. Also, we cannot create or fire the watchpoint set from the concurrent
+                // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
+                // So, better leave this alone and take slow path.
+                return PutByIdStatus(TakesSlowPath);
+            }
+
+            PutByIdVariant variant =
+                PutByIdVariant::replace(structure, offset, structure->inferredTypeDescriptorFor(uid));
+            if (!result.appendVariant(variant))
+                return PutByIdStatus(TakesSlowPath);
+            continue;
+        }
+    
+        // Our hypothesis is that we're doing a transition. Before we prove that this is really
+        // true, we want to do some sanity checks.
+    
+        // Don't cache put transitions on dictionaries.
+        if (structure->isDictionary())
+            return PutByIdStatus(TakesSlowPath);
+
+        // If the structure corresponds to something that isn't an object, then give up, since
+        // we don't want to be adding properties to strings.
+        if (!structure->typeInfo().isObject())
+            return PutByIdStatus(TakesSlowPath);
+    
+        ObjectPropertyConditionSet conditionSet;
+        if (!isDirect) {
+            conditionSet = generateConditionsForPropertySetterMissConcurrently(
+                globalObject->vm(), globalObject, structure, uid);
+            if (!conditionSet.isValid())
+                return PutByIdStatus(TakesSlowPath);
+        }
+    
+        // We only optimize if there is already a structure that the transition is cached to.
+        Structure* transition =
+            Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
+        if (!transition)
+            return PutByIdStatus(TakesSlowPath);
+        ASSERT(isValidOffset(offset));
+    
+        bool didAppend = result.appendVariant(
+            PutByIdVariant::transition(
+                structure, transition, conditionSet, offset,
+                transition->inferredTypeDescriptorFor(uid)));
+        if (!didAppend)
+            return PutByIdStatus(TakesSlowPath);
+    }
+    
+    return result;
+}
+
+bool PutByIdStatus::makesCalls() const
+{
+    if (m_state == MakesCalls)
+        return true;
+    
+    if (m_state != Simple)
+        return false;
+    
+    for (unsigned i = m_variants.size(); i--;) {
+        if (m_variants[i].makesCalls())
+            return true;
+    }
+    
+    return false;
+}
+
+void PutByIdStatus::dump(PrintStream& out) const
+{
+    switch (m_state) {
+    case NoInformation:
+        out.print("(NoInformation)");
+        return;
+        
+    case Simple:
+        out.print("(", listDump(m_variants), ")");
+        return;
+        
+    case TakesSlowPath:
+        out.print("(TakesSlowPath)");
+        return;
+    case MakesCalls:
+        out.print("(MakesCalls)");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/PutByIdStatus.h b/bytecode/PutByIdStatus.h
new file mode 100644
index 0000000..1dd95cd
--- /dev/null
+++ b/bytecode/PutByIdStatus.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CallLinkStatus.h"
+#include "ExitingJITType.h"
+#include "PutByIdVariant.h"
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+class VM;
+class JSGlobalObject;
+class Structure;
+class StructureChain;
+class StructureStubInfo;
+
+typedef HashMap StubInfoMap;
+
+class PutByIdStatus {
+public:
+    enum State {
+        // It's uncached so we have no information.
+        NoInformation,
+        // It's cached as a simple store of some kind.
+        Simple,
+        // It's known to often take slow path.
+        TakesSlowPath,
+        // It's known to take paths that make calls.
+        MakesCalls
+    };
+    
+    PutByIdStatus()
+        : m_state(NoInformation)
+    {
+    }
+    
+    explicit PutByIdStatus(State state)
+        : m_state(state)
+    {
+        ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls);
+    }
+    
+    PutByIdStatus(const PutByIdVariant& variant)
+        : m_state(Simple)
+    {
+        m_variants.append(variant);
+    }
+    
+    static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect);
+    
+    static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid);
+
+#if ENABLE(JIT)
+    static PutByIdStatus computeForStubInfo(const ConcurrentJSLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid);
+#endif
+    
+    State state() const { return m_state; }
+    
+    bool isSet() const { return m_state != NoInformation; }
+    bool operator!() const { return m_state == NoInformation; }
+    bool isSimple() const { return m_state == Simple; }
+    bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; }
+    bool makesCalls() const;
+    
+    size_t numVariants() const { return m_variants.size(); }
+    const Vector& variants() const { return m_variants; }
+    const PutByIdVariant& at(size_t index) const { return m_variants[index]; }
+    const PutByIdVariant& operator[](size_t index) const { return at(index); }
+    
+    void dump(PrintStream&) const;
+    
+private:
+#if ENABLE(DFG_JIT)
+    static bool hasExitSite(const ConcurrentJSLocker&, CodeBlock*, unsigned bytecodeIndex);
+#endif
+#if ENABLE(JIT)
+    static PutByIdStatus computeForStubInfo(
+        const ConcurrentJSLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid,
+        CallLinkStatus::ExitSiteData);
+#endif
+    static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid);
+    
+    bool appendVariant(const PutByIdVariant&);
+    
+    State m_state;
+    Vector m_variants;
+};
+
+} // namespace JSC
diff --git a/bytecode/PutByIdVariant.cpp b/bytecode/PutByIdVariant.cpp
new file mode 100644
index 0000000..9904c62
--- /dev/null
+++ b/bytecode/PutByIdVariant.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "PutByIdVariant.h"
+
+#include "CallLinkStatus.h"
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+PutByIdVariant::PutByIdVariant(const PutByIdVariant& other)
+    : PutByIdVariant()
+{
+    *this = other;
+}
+
+PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other)
+{
+    m_kind = other.m_kind;
+    m_oldStructure = other.m_oldStructure;
+    m_newStructure = other.m_newStructure;
+    m_conditionSet = other.m_conditionSet;
+    m_offset = other.m_offset;
+    m_requiredType = other.m_requiredType;
+    if (other.m_callLinkStatus)
+        m_callLinkStatus = std::make_unique(*other.m_callLinkStatus);
+    else
+        m_callLinkStatus = nullptr;
+    return *this;
+}
+
+PutByIdVariant PutByIdVariant::replace(
+    const StructureSet& structure, PropertyOffset offset, const InferredType::Descriptor& requiredType)
+{
+    PutByIdVariant result;
+    result.m_kind = Replace;
+    result.m_oldStructure = structure;
+    result.m_offset = offset;
+    result.m_requiredType = requiredType;
+    return result;
+}
+
+PutByIdVariant PutByIdVariant::transition(
+    const StructureSet& oldStructure, Structure* newStructure,
+    const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset,
+    const InferredType::Descriptor& requiredType)
+{
+    PutByIdVariant result;
+    result.m_kind = Transition;
+    result.m_oldStructure = oldStructure;
+    result.m_newStructure = newStructure;
+    result.m_conditionSet = conditionSet;
+    result.m_offset = offset;
+    result.m_requiredType = requiredType;
+    return result;
+}
+
+PutByIdVariant PutByIdVariant::setter(
+    const StructureSet& structure, PropertyOffset offset,
+    const ObjectPropertyConditionSet& conditionSet,
+    std::unique_ptr callLinkStatus)
+{
+    PutByIdVariant result;
+    result.m_kind = Setter;
+    result.m_oldStructure = structure;
+    result.m_conditionSet = conditionSet;
+    result.m_offset = offset;
+    result.m_callLinkStatus = WTFMove(callLinkStatus);
+    result.m_requiredType = InferredType::Top;
+    return result;
+}
+
+Structure* PutByIdVariant::oldStructureForTransition() const
+{
+    ASSERT(kind() == Transition);
+    ASSERT(m_oldStructure.size() <= 2);
+    for (unsigned i = m_oldStructure.size(); i--;) {
+        Structure* structure = m_oldStructure[i];
+        if (structure != m_newStructure)
+            return structure;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+
+    return nullptr;
+}
+
+bool PutByIdVariant::writesStructures() const
+{
+    switch (kind()) {
+    case Transition:
+    case Setter:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::reallocatesStorage() const
+{
+    switch (kind()) {
+    case Transition:
+        return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity();
+    case Setter:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::makesCalls() const
+{
+    return kind() == Setter;
+}
+
+bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other)
+{
+    if (m_offset != other.m_offset)
+        return false;
+
+    if (m_requiredType != other.m_requiredType)
+        return false;
+    
+    switch (m_kind) {
+    case Replace: {
+        switch (other.m_kind) {
+        case Replace: {
+            ASSERT(m_conditionSet.isEmpty());
+            ASSERT(other.m_conditionSet.isEmpty());
+            
+            m_oldStructure.merge(other.m_oldStructure);
+            return true;
+        }
+            
+        case Transition: {
+            PutByIdVariant newVariant = other;
+            if (newVariant.attemptToMergeTransitionWithReplace(*this)) {
+                *this = newVariant;
+                return true;
+            }
+            return false;
+        }
+            
+        default:
+            return false;
+        }
+    }
+        
+    case Transition:
+        switch (other.m_kind) {
+        case Replace:
+            return attemptToMergeTransitionWithReplace(other);
+            
+        default:
+            return false;
+        }
+        
+    default:
+        return false;
+    }
+}
+
+bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace)
+{
+    ASSERT(m_kind == Transition);
+    ASSERT(replace.m_kind == Replace);
+    ASSERT(m_offset == replace.m_offset);
+    ASSERT(!replace.writesStructures());
+    ASSERT(!replace.reallocatesStorage());
+    ASSERT(replace.conditionSet().isEmpty());
+    
+    // This sort of merging only works when we have one path along which we add a new field which
+    // transitions to structure S while the other path was already on structure S. This doesn't
+    // work if we need to reallocate anything or if the replace path is polymorphic.
+    
+    if (reallocatesStorage())
+        return false;
+    
+    if (replace.m_oldStructure.onlyStructure() != m_newStructure)
+        return false;
+    
+    m_oldStructure.merge(m_newStructure);
+    return true;
+}
+
+void PutByIdVariant::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+
+void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    switch (kind()) {
+    case NotSet:
+        out.print("");
+        return;
+        
+    case Replace:
+        out.print(
+            "");
+        return;
+        
+    case Transition:
+        out.print(
+            " ",
+            pointerDumpInContext(newStructure(), context), ", [",
+            inContext(m_conditionSet, context), "], offset = ", offset(), ", ",
+            inContext(requiredType(), context), ">");
+        return;
+        
+    case Setter:
+        out.print(
+            "");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/PutByIdVariant.h b/bytecode/PutByIdVariant.h
new file mode 100644
index 0000000..bda17bb
--- /dev/null
+++ b/bytecode/PutByIdVariant.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ObjectPropertyConditionSet.h"
+#include "PropertyOffset.h"
+#include "StructureSet.h"
+
+namespace JSC {
+
+class CallLinkStatus;
+
+class PutByIdVariant {
+public:
+    enum Kind {
+        NotSet,
+        Replace,
+        Transition,
+        Setter
+    };
+    
+    PutByIdVariant()
+        : m_kind(NotSet)
+        , m_newStructure(nullptr)
+        , m_offset(invalidOffset)
+    {
+    }
+    
+    PutByIdVariant(const PutByIdVariant&);
+    PutByIdVariant& operator=(const PutByIdVariant&);
+
+    static PutByIdVariant replace(const StructureSet&, PropertyOffset, const InferredType::Descriptor&);
+    
+    static PutByIdVariant transition(
+        const StructureSet& oldStructure, Structure* newStructure,
+        const ObjectPropertyConditionSet&, PropertyOffset, const InferredType::Descriptor&);
+    
+    static PutByIdVariant setter(
+        const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&,
+        std::unique_ptr);
+    
+    Kind kind() const { return m_kind; }
+    
+    bool isSet() const { return kind() != NotSet; }
+    bool operator!() const { return !isSet(); }
+    
+    const StructureSet& structure() const
+    {
+        ASSERT(kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    const StructureSet& structureSet() const
+    {
+        return structure();
+    }
+    
+    const StructureSet& oldStructure() const
+    {
+        ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    StructureSet& oldStructure()
+    {
+        ASSERT(kind() == Transition || kind() == Replace || kind() == Setter);
+        return m_oldStructure;
+    }
+    
+    Structure* oldStructureForTransition() const;
+    
+    Structure* newStructure() const
+    {
+        ASSERT(kind() == Transition);
+        return m_newStructure;
+    }
+
+    InferredType::Descriptor requiredType() const
+    {
+        return m_requiredType;
+    }
+
+    bool writesStructures() const;
+    bool reallocatesStorage() const;
+    bool makesCalls() const;
+    
+    const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; }
+    
+    // We don't support intrinsics for Setters (it would be sweet if we did) but we need this for templated helpers.
+    Intrinsic intrinsic() const { return NoIntrinsic; }
+
+    // This is needed for templated helpers.
+    bool isPropertyUnset() const { return false; }
+
+    PropertyOffset offset() const
+    {
+        ASSERT(isSet());
+        return m_offset;
+    }
+    
+    CallLinkStatus* callLinkStatus() const
+    {
+        ASSERT(kind() == Setter);
+        return m_callLinkStatus.get();
+    }
+
+    bool attemptToMerge(const PutByIdVariant& other);
+    
+    void dump(PrintStream&) const;
+    void dumpInContext(PrintStream&, DumpContext*) const;
+
+private:
+    bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace);
+    
+    Kind m_kind;
+    StructureSet m_oldStructure;
+    Structure* m_newStructure;
+    ObjectPropertyConditionSet m_conditionSet;
+    PropertyOffset m_offset;
+    InferredType::Descriptor m_requiredType;
+    std::unique_ptr m_callLinkStatus;
+};
+
+} // namespace JSC
diff --git a/bytecode/PutKind.h b/bytecode/PutKind.h
new file mode 100644
index 0000000..611279f
--- /dev/null
+++ b/bytecode/PutKind.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum PutKind { Direct, NotDirect };
+
+} // namespace JSC
diff --git a/bytecode/ReduceWhitespace.cpp b/bytecode/ReduceWhitespace.cpp
new file mode 100644
index 0000000..d1f25b0
--- /dev/null
+++ b/bytecode/ReduceWhitespace.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ReduceWhitespace.h"
+
+#include 
+#include 
+
+namespace JSC {
+
+CString reduceWhitespace(const CString& input)
+{
+    StringPrintStream out;
+    
+    const char* data = input.data();
+    
+    for (unsigned i = 0; i < input.length();) {
+        if (isASCIISpace(data[i])) {
+            while (i < input.length() && isASCIISpace(data[i]))
+                ++i;
+            out.print(CharacterDump(' '));
+            continue;
+        }
+        out.print(CharacterDump(data[i]));
+        ++i;
+    }
+    
+    return out.toCString();
+}
+
+} // namespace JSC
diff --git a/bytecode/ReduceWhitespace.h b/bytecode/ReduceWhitespace.h
new file mode 100644
index 0000000..fcb86c0
--- /dev/null
+++ b/bytecode/ReduceWhitespace.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+// Replace all whitespace runs with a single space.
+CString reduceWhitespace(const CString&);
+
+} // namespace JSC
diff --git a/bytecode/SpecialPointer.cpp b/bytecode/SpecialPointer.cpp
new file mode 100644
index 0000000..dc5a363
--- /dev/null
+++ b/bytecode/SpecialPointer.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "SpecialPointer.h"
+
+#include "CodeBlock.h"
+#include "JSGlobalObject.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer)
+{
+    return globalObject->actualPointerFor(pointer);
+}
+
+void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer)
+{
+    return actualPointerFor(codeBlock->globalObject(), pointer);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/SpecialPointer.h b/bytecode/SpecialPointer.h
new file mode 100644
index 0000000..21329ec
--- /dev/null
+++ b/bytecode/SpecialPointer.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+class CodeBlock;
+class JSGlobalObject;
+
+namespace Special {
+enum Pointer {
+    CallFunction,
+    ApplyFunction,
+    ObjectConstructor,
+    ArrayConstructor,
+    TableSize // Not a real special pointer. Use this to determine the number of pointers.
+};
+} // namespace Special
+
+enum class LinkTimeConstant {
+    ThrowTypeErrorFunction,
+};
+const unsigned LinkTimeConstantCount = 1;
+
+inline bool pointerIsFunction(Special::Pointer pointer)
+{
+    ASSERT_UNUSED(pointer, pointer < Special::TableSize);
+    return true;
+}
+
+inline bool pointerIsCell(Special::Pointer pointer)
+{
+    ASSERT_UNUSED(pointer, pointer < Special::TableSize);
+    return true;
+}
+
+void* actualPointerFor(JSGlobalObject*, Special::Pointer);
+void* actualPointerFor(CodeBlock*, Special::Pointer);
+
+} // namespace JSC
diff --git a/bytecode/SpeculatedType.cpp b/bytecode/SpeculatedType.cpp
new file mode 100644
index 0000000..2425292
--- /dev/null
+++ b/bytecode/SpeculatedType.cpp
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2011-2013, 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SpeculatedType.h"
+
+#include "DirectArguments.h"
+#include "JSArray.h"
+#include "JSCInlines.h"
+#include "JSFunction.h"
+#include "JSMap.h"
+#include "JSSet.h"
+#include "ProxyObject.h"
+#include "RegExpObject.h"
+#include "ScopedArguments.h"
+#include "StringObject.h"
+#include "ValueProfile.h"
+#include 
+
+namespace JSC {
+
+void dumpSpeculation(PrintStream& out, SpeculatedType value)
+{
+    if (value == SpecNone) {
+        out.print("None");
+        return;
+    }
+    
+    StringPrintStream myOut;
+    
+    bool isTop = true;
+    
+    if ((value & SpecCell) == SpecCell)
+        myOut.print("Cell");
+    else {
+        if ((value & SpecObject) == SpecObject)
+            myOut.print("Object");
+        else {
+            if (value & SpecCellOther)
+                myOut.print("Othercell");
+            else
+                isTop = false;
+    
+            if (value & SpecObjectOther)
+                myOut.print("Otherobj");
+            else
+                isTop = false;
+    
+            if (value & SpecFinalObject)
+                myOut.print("Final");
+            else
+                isTop = false;
+
+            if (value & SpecArray)
+                myOut.print("Array");
+            else
+                isTop = false;
+    
+            if (value & SpecInt8Array)
+                myOut.print("Int8array");
+            else
+                isTop = false;
+    
+            if (value & SpecInt16Array)
+                myOut.print("Int16array");
+            else
+                isTop = false;
+    
+            if (value & SpecInt32Array)
+                myOut.print("Int32array");
+            else
+                isTop = false;
+    
+            if (value & SpecUint8Array)
+                myOut.print("Uint8array");
+            else
+                isTop = false;
+
+            if (value & SpecUint8ClampedArray)
+                myOut.print("Uint8clampedarray");
+            else
+                isTop = false;
+    
+            if (value & SpecUint16Array)
+                myOut.print("Uint16array");
+            else
+                isTop = false;
+    
+            if (value & SpecUint32Array)
+                myOut.print("Uint32array");
+            else
+                isTop = false;
+    
+            if (value & SpecFloat32Array)
+                myOut.print("Float32array");
+            else
+                isTop = false;
+    
+            if (value & SpecFloat64Array)
+                myOut.print("Float64array");
+            else
+                isTop = false;
+    
+            if (value & SpecFunction)
+                myOut.print("Function");
+            else
+                isTop = false;
+    
+            if (value & SpecDirectArguments)
+                myOut.print("Directarguments");
+            else
+                isTop = false;
+    
+            if (value & SpecScopedArguments)
+                myOut.print("Scopedarguments");
+            else
+                isTop = false;
+    
+            if (value & SpecStringObject)
+                myOut.print("Stringobject");
+            else
+                isTop = false;
+    
+            if (value & SpecRegExpObject)
+                myOut.print("Regexpobject");
+            else
+                isTop = false;
+
+            if (value & SpecMapObject)
+                myOut.print("Mapobject");
+            else
+                isTop = false;
+
+            if (value & SpecSetObject)
+                myOut.print("Setobject");
+            else
+                isTop = false;
+
+            if (value & SpecProxyObject)
+                myOut.print("Proxyobject");
+            else
+                isTop = false;
+
+            if (value & SpecDerivedArray)
+                myOut.print("Derivedarray");
+            else
+                isTop = false;
+        }
+
+        if ((value & SpecString) == SpecString)
+            myOut.print("String");
+        else {
+            if (value & SpecStringIdent)
+                myOut.print("Stringident");
+            else
+                isTop = false;
+            
+            if (value & SpecStringVar)
+                myOut.print("Stringvar");
+            else
+                isTop = false;
+        }
+
+        if (value & SpecSymbol)
+            myOut.print("Symbol");
+        else
+            isTop = false;
+    }
+    
+    if (value == SpecInt32Only)
+        myOut.print("Int32");
+    else {
+        if (value & SpecBoolInt32)
+            myOut.print("Boolint32");
+        else
+            isTop = false;
+        
+        if (value & SpecNonBoolInt32)
+            myOut.print("Nonboolint32");
+        else
+            isTop = false;
+    }
+    
+    if (value & SpecInt52Only)
+        myOut.print("Int52");
+        
+    if ((value & SpecBytecodeDouble) == SpecBytecodeDouble)
+        myOut.print("Bytecodedouble");
+    else {
+        if (value & SpecAnyIntAsDouble)
+            myOut.print("AnyIntAsDouble");
+        else
+            isTop = false;
+        
+        if (value & SpecNonIntAsDouble)
+            myOut.print("Nonintasdouble");
+        else
+            isTop = false;
+        
+        if (value & SpecDoublePureNaN)
+            myOut.print("Doublepurenan");
+        else
+            isTop = false;
+    }
+    
+    if (value & SpecDoubleImpureNaN)
+        out.print("Doubleimpurenan");
+    
+    if (value & SpecBoolean)
+        myOut.print("Bool");
+    else
+        isTop = false;
+    
+    if (value & SpecOther)
+        myOut.print("Other");
+    else
+        isTop = false;
+    
+    if (isTop)
+        out.print("Top");
+    else
+        out.print(myOut.toCString());
+    
+    if (value & SpecEmpty)
+        out.print("Empty");
+}
+
+// We don't expose this because we don't want anyone relying on the fact that this method currently
+// just returns string constants.
+static const char* speculationToAbbreviatedString(SpeculatedType prediction)
+{
+    if (isFinalObjectSpeculation(prediction))
+        return "";
+    if (isArraySpeculation(prediction))
+        return "";
+    if (isStringIdentSpeculation(prediction))
+        return "";
+    if (isStringSpeculation(prediction))
+        return "";
+    if (isFunctionSpeculation(prediction))
+        return "";
+    if (isInt8ArraySpeculation(prediction))
+        return "";
+    if (isInt16ArraySpeculation(prediction))
+        return "";
+    if (isInt32ArraySpeculation(prediction))
+        return "";
+    if (isUint8ArraySpeculation(prediction))
+        return "";
+    if (isUint16ArraySpeculation(prediction))
+        return "";
+    if (isUint32ArraySpeculation(prediction))
+        return "";
+    if (isFloat32ArraySpeculation(prediction))
+        return "";
+    if (isFloat64ArraySpeculation(prediction))
+        return "";
+    if (isDirectArgumentsSpeculation(prediction))
+        return "";
+    if (isScopedArgumentsSpeculation(prediction))
+        return "";
+    if (isStringObjectSpeculation(prediction))
+        return "";
+    if (isRegExpObjectSpeculation(prediction))
+        return "";
+    if (isStringOrStringObjectSpeculation(prediction))
+        return "";
+    if (isObjectSpeculation(prediction))
+        return "";
+    if (isCellSpeculation(prediction))
+        return "";
+    if (isBoolInt32Speculation(prediction))
+        return "";
+    if (isInt32Speculation(prediction))
+        return "";
+    if (isAnyIntAsDoubleSpeculation(prediction))
+        return "";
+    if (isInt52Speculation(prediction))
+        return "";
+    if (isAnyIntSpeculation(prediction))
+        return "";
+    if (isDoubleSpeculation(prediction))
+        return "";
+    if (isFullNumberSpeculation(prediction))
+        return "";
+    if (isBooleanSpeculation(prediction))
+        return "";
+    if (isOtherSpeculation(prediction))
+        return "";
+    if (isMiscSpeculation(prediction))
+        return "";
+    return "";
+}
+
+void dumpSpeculationAbbreviated(PrintStream& out, SpeculatedType value)
+{
+    out.print(speculationToAbbreviatedString(value));
+}
+
+SpeculatedType speculationFromTypedArrayType(TypedArrayType type)
+{
+    switch (type) {
+    case TypeInt8:
+        return SpecInt8Array;
+    case TypeInt16:
+        return SpecInt16Array;
+    case TypeInt32:
+        return SpecInt32Array;
+    case TypeUint8:
+        return SpecUint8Array;
+    case TypeUint8Clamped:
+        return SpecUint8ClampedArray;
+    case TypeUint16:
+        return SpecUint16Array;
+    case TypeUint32:
+        return SpecUint32Array;
+    case TypeFloat32:
+        return SpecFloat32Array;
+    case TypeFloat64:
+        return SpecFloat64Array;
+    case NotTypedArray:
+    case TypeDataView:
+        break;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+    return SpecNone;
+}
+
+SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo)
+{
+    if (classInfo == JSString::info())
+        return SpecString;
+
+    if (classInfo == Symbol::info())
+        return SpecSymbol;
+
+    if (classInfo == JSFinalObject::info())
+        return SpecFinalObject;
+    
+    if (classInfo == JSArray::info())
+        return SpecArray;
+    
+    if (classInfo == DirectArguments::info())
+        return SpecDirectArguments;
+    
+    if (classInfo == ScopedArguments::info())
+        return SpecScopedArguments;
+    
+    if (classInfo == StringObject::info())
+        return SpecStringObject;
+
+    if (classInfo == RegExpObject::info())
+        return SpecRegExpObject;
+
+    if (classInfo == JSMap::info())
+        return SpecMapObject;
+
+    if (classInfo == JSSet::info())
+        return SpecSetObject;
+
+    if (classInfo == ProxyObject::info())
+        return SpecProxyObject;
+    
+    if (classInfo->isSubClassOf(JSFunction::info()))
+        return SpecFunction;
+    
+    if (isTypedView(classInfo->typedArrayStorageType))
+        return speculationFromTypedArrayType(classInfo->typedArrayStorageType);
+
+    if (classInfo->isSubClassOf(JSArray::info()))
+        return SpecDerivedArray;
+    
+    if (classInfo->isSubClassOf(JSObject::info()))
+        return SpecObjectOther;
+    
+    return SpecCellOther;
+}
+
+SpeculatedType speculationFromStructure(Structure* structure)
+{
+    if (structure->typeInfo().type() == StringType)
+        return SpecString;
+    if (structure->typeInfo().type() == SymbolType)
+        return SpecSymbol;
+    if (structure->typeInfo().type() == DerivedArrayType)
+        return SpecDerivedArray;
+    return speculationFromClassInfo(structure->classInfo());
+}
+
+SpeculatedType speculationFromCell(JSCell* cell)
+{
+    if (JSString* string = jsDynamicCast(cell)) {
+        if (const StringImpl* impl = string->tryGetValueImpl()) {
+            if (impl->isAtomic())
+                return SpecStringIdent;
+        }
+        return SpecStringVar;
+    }
+    return speculationFromStructure(cell->structure());
+}
+
+SpeculatedType speculationFromValue(JSValue value)
+{
+    if (value.isEmpty())
+        return SpecEmpty;
+    if (value.isInt32()) {
+        if (value.asInt32() & ~1)
+            return SpecNonBoolInt32;
+        return SpecBoolInt32;
+    }
+    if (value.isDouble()) {
+        double number = value.asNumber();
+        if (number != number)
+            return SpecDoublePureNaN;
+        if (value.isAnyInt())
+            return SpecAnyIntAsDouble;
+        return SpecNonIntAsDouble;
+    }
+    if (value.isCell())
+        return speculationFromCell(value.asCell());
+    if (value.isBoolean())
+        return SpecBoolean;
+    ASSERT(value.isUndefinedOrNull());
+    return SpecOther;
+}
+
+TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type)
+{
+    if (isInt8ArraySpeculation(type))
+        return TypeInt8;
+        
+    if (isInt16ArraySpeculation(type))
+        return TypeInt16;
+        
+    if (isInt32ArraySpeculation(type))
+        return TypeInt32;
+        
+    if (isUint8ArraySpeculation(type))
+        return TypeUint8;
+        
+    if (isUint8ClampedArraySpeculation(type))
+        return TypeUint8Clamped;
+        
+    if (isUint16ArraySpeculation(type))
+        return TypeUint16;
+        
+    if (isUint32ArraySpeculation(type))
+        return TypeUint32;
+        
+    if (isFloat32ArraySpeculation(type))
+        return TypeFloat32;
+        
+    if (isFloat64ArraySpeculation(type))
+        return TypeFloat64;
+    
+    return NotTypedArray;
+}
+
+SpeculatedType speculationFromJSType(JSType type)
+{
+    switch (type) {
+    case StringType:
+        return SpecString;
+    case SymbolType:
+        return SpecSymbol;
+    case ArrayType:
+        return SpecArray;
+    case DerivedArrayType:
+        return SpecDerivedArray;
+    case RegExpObjectType:
+        return SpecRegExpObject;
+    case ProxyObjectType:
+        return SpecProxyObject;
+    case JSMapType:
+        return SpecMapObject;
+    case JSSetType:
+        return SpecSetObject;
+    default:
+        ASSERT_NOT_REACHED();
+    }
+    return SpecNone;
+}
+
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type)
+{
+    if (type & (SpecAnyInt | SpecAnyIntAsDouble))
+        type |= (SpecAnyInt | SpecAnyIntAsDouble);
+    if (type & SpecString)
+        type |= SpecString;
+    return type;
+}
+
+bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b)
+{
+    a = leastUpperBoundOfStrictlyEquivalentSpeculations(a);
+    b = leastUpperBoundOfStrictlyEquivalentSpeculations(b);
+    
+    // Anything could be equal to a string.
+    if (a & SpecString)
+        return true;
+    if (b & SpecString)
+        return true;
+    
+    // If both sides are definitely only objects, then equality is fairly sane.
+    if (isObjectSpeculation(a) && isObjectSpeculation(b))
+        return !!(a & b);
+    
+    // If either side could be an object or not, then we could call toString or
+    // valueOf, which could return anything.
+    if (a & SpecObject)
+        return true;
+    if (b & SpecObject)
+        return true;
+    
+    // Neither side is an object or string, so the world is relatively sane.
+    return !!(a & b);
+}
+
+SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b)
+{
+    SpeculatedType result = a | b;
+    // Impure NaN could become pure NaN during addition because addition may clear bits.
+    if (result & SpecDoubleImpureNaN)
+        result |= SpecDoublePureNaN;
+    // Values could overflow, or fractions could become integers.
+    if (result & SpecDoubleReal)
+        result |= SpecDoubleReal;
+    return result;
+}
+
+SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b)
+{
+    return typeOfDoubleSum(a, b);
+}
+
+SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b)
+{
+    return typeOfDoubleSum(a, b);
+}
+
+static SpeculatedType polluteDouble(SpeculatedType value)
+{
+    // Impure NaN could become pure NaN because the operation could clear some bits.
+    if (value & SpecDoubleImpureNaN)
+        value |= SpecDoubleNaN;
+    // Values could overflow, fractions could become integers, or an error could produce
+    // PureNaN.
+    if (value & SpecDoubleReal)
+        value |= SpecDoubleReal | SpecDoublePureNaN;
+    return value;
+}
+
+SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b)
+{
+    return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b)
+{
+    SpeculatedType result = a | b;
+    // Impure NaN could become pure NaN during addition because addition may clear bits.
+    if (result & SpecDoubleImpureNaN)
+        result |= SpecDoublePureNaN;
+    return result;
+}
+
+SpeculatedType typeOfDoubleNegation(SpeculatedType value)
+{
+    // Changing bits can make pure NaN impure and vice versa:
+    // 0xefff000000000000 (pure) - 0xffff000000000000 (impure)
+    if (value & SpecDoubleNaN)
+        value |= SpecDoubleNaN;
+    // We could get negative zero, which mixes SpecAnyIntAsDouble and SpecNotIntAsDouble.
+    // We could also overflow a large negative int into something that is no longer
+    // representable as an int.
+    if (value & SpecDoubleReal)
+        value |= SpecDoubleReal;
+    return value;
+}
+
+SpeculatedType typeOfDoubleAbs(SpeculatedType value)
+{
+    return typeOfDoubleNegation(value);
+}
+
+SpeculatedType typeOfDoubleRounding(SpeculatedType value)
+{
+    // Double Pure NaN can becomes impure when converted back from Float.
+    // and vice versa.
+    if (value & SpecDoubleNaN)
+        value |= SpecDoubleNaN;
+    // We might lose bits, which leads to a value becoming integer-representable.
+    if (value & SpecNonIntAsDouble)
+        value |= SpecAnyIntAsDouble;
+    return value;
+}
+
+SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue)
+{
+    // Math.pow() always return NaN if the exponent is NaN, unlike std::pow().
+    // We always set a pure NaN in that case.
+    if (yValue & SpecDoubleNaN)
+        xValue |= SpecDoublePureNaN;
+    return polluteDouble(xValue);
+}
+
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b)
+{
+    return polluteDouble(a | b);
+}
+
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value)
+{
+    return polluteDouble(value);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/SpeculatedType.h b/bytecode/SpeculatedType.h
new file mode 100644
index 0000000..e23fd2c
--- /dev/null
+++ b/bytecode/SpeculatedType.h
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "TypedArrayType.h"
+#include 
+
+namespace JSC {
+
+class Structure;
+
+typedef uint64_t SpeculatedType;
+static const SpeculatedType SpecNone               = 0; // We don't know anything yet.
+static const SpeculatedType SpecFinalObject        = 1ull << 0; // It's definitely a JSFinalObject.
+static const SpeculatedType SpecArray              = 1ull << 1; // It's definitely a JSArray.
+static const SpeculatedType SpecFunction           = 1ull << 2; // It's definitely a JSFunction.
+static const SpeculatedType SpecInt8Array          = 1ull << 3; // It's definitely an Int8Array or one of its subclasses.
+static const SpeculatedType SpecInt16Array         = 1ull << 4; // It's definitely an Int16Array or one of its subclasses.
+static const SpeculatedType SpecInt32Array         = 1ull << 5; // It's definitely an Int32Array or one of its subclasses.
+static const SpeculatedType SpecUint8Array         = 1ull << 6; // It's definitely an Uint8Array or one of its subclasses.
+static const SpeculatedType SpecUint8ClampedArray  = 1ull << 7; // It's definitely an Uint8ClampedArray or one of its subclasses.
+static const SpeculatedType SpecUint16Array        = 1ull << 8; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecUint32Array        = 1ull << 9; // It's definitely an Uint32Array or one of its subclasses.
+static const SpeculatedType SpecFloat32Array       = 1ull << 10; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecFloat64Array       = 1ull << 11; // It's definitely an Uint16Array or one of its subclasses.
+static const SpeculatedType SpecTypedArrayView     = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array;
+static const SpeculatedType SpecDirectArguments    = 1ull << 12; // It's definitely a DirectArguments object.
+static const SpeculatedType SpecScopedArguments    = 1ull << 13; // It's definitely a ScopedArguments object.
+static const SpeculatedType SpecStringObject       = 1ull << 14; // It's definitely a StringObject.
+static const SpeculatedType SpecRegExpObject       = 1ull << 15; // It's definitely a RegExpObject (and not any subclass of RegExpObject).
+static const SpeculatedType SpecMapObject          = 1ull << 16; // It's definitely a Map object or one of its subclasses.
+static const SpeculatedType SpecSetObject          = 1ull << 17; // It's definitely a Set object or one of its subclasses.
+static const SpeculatedType SpecProxyObject        = 1ull << 18; // It's definitely a Proxy object or one of its subclasses.
+static const SpeculatedType SpecDerivedArray       = 1ull << 19; // It's definitely a DerivedArray object.
+static const SpeculatedType SpecObjectOther        = 1ull << 20; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction.
+static const SpeculatedType SpecObject             = SpecFinalObject | SpecArray | SpecFunction | SpecTypedArrayView | SpecDirectArguments | SpecScopedArguments | SpecStringObject | SpecRegExpObject | SpecMapObject | SpecSetObject | SpecProxyObject | SpecDerivedArray | SpecObjectOther; // Bitmask used for testing for any kind of object prediction.
+static const SpeculatedType SpecStringIdent        = 1ull << 21; // It's definitely a JSString, and it's an identifier.
+static const SpeculatedType SpecStringVar          = 1ull << 22; // It's definitely a JSString, and it's not an identifier.
+static const SpeculatedType SpecString             = SpecStringIdent | SpecStringVar; // It's definitely a JSString.
+static const SpeculatedType SpecSymbol             = 1ull << 23; // It's definitely a Symbol.
+static const SpeculatedType SpecCellOther          = 1ull << 24; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078
+static const SpeculatedType SpecCell               = SpecObject | SpecString | SpecSymbol | SpecCellOther; // It's definitely a JSCell.
+static const SpeculatedType SpecBoolInt32          = 1ull << 25; // It's definitely an Int32 with value 0 or 1.
+static const SpeculatedType SpecNonBoolInt32       = 1ull << 26; // It's definitely an Int32 with value other than 0 or 1.
+static const SpeculatedType SpecInt32Only          = SpecBoolInt32 | SpecNonBoolInt32; // It's definitely an Int32.
+static const SpeculatedType SpecInt52Only          = 1ull << 27; // It's definitely an Int52 and we intend it to unbox it. It's also definitely not an Int32.
+static const SpeculatedType SpecAnyInt             = SpecInt32Only | SpecInt52Only; // It's something that we can do machine int arithmetic on.
+static const SpeculatedType SpecAnyIntAsDouble     = 1ull << 28; // It's definitely an Int52 and it's inside a double.
+static const SpeculatedType SpecNonIntAsDouble     = 1ull << 29; // It's definitely not an Int52 but it's a real number and it's a double.
+static const SpeculatedType SpecDoubleReal         = SpecNonIntAsDouble | SpecAnyIntAsDouble; // It's definitely a non-NaN double.
+static const SpeculatedType SpecDoublePureNaN      = 1ull << 30; // It's definitely a NaN that is sae to tag (i.e. pure).
+static const SpeculatedType SpecDoubleImpureNaN    = 1ull << 31; // It's definitely a NaN that is unsafe to tag (i.e. impure).
+static const SpeculatedType SpecDoubleNaN          = SpecDoublePureNaN | SpecDoubleImpureNaN; // It's definitely some kind of NaN.
+static const SpeculatedType SpecBytecodeDouble     = SpecDoubleReal | SpecDoublePureNaN; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN.
+static const SpeculatedType SpecFullDouble         = SpecDoubleReal | SpecDoubleNaN; // It's either a non-NaN or a NaN double.
+static const SpeculatedType SpecBytecodeRealNumber = SpecInt32Only | SpecDoubleReal; // It's either an Int32 or a DoubleReal.
+static const SpeculatedType SpecFullRealNumber     = SpecAnyInt | SpecDoubleReal; // It's either an Int32 or a DoubleReal, or a Int52.
+static const SpeculatedType SpecBytecodeNumber     = SpecInt32Only | SpecBytecodeDouble; // It's either an Int32 or a Double, and the Double cannot be an impure NaN.
+static const SpeculatedType SpecFullNumber         = SpecAnyInt | SpecFullDouble; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN.
+static const SpeculatedType SpecBoolean            = 1ull << 32; // It's definitely a Boolean.
+static const SpeculatedType SpecOther              = 1ull << 33; // It's definitely either Null or Undefined.
+static const SpeculatedType SpecMisc               = SpecBoolean | SpecOther; // It's definitely either a boolean, Null, or Undefined.
+static const SpeculatedType SpecHeapTop            = SpecCell | SpecBytecodeNumber | SpecMisc; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN.
+static const SpeculatedType SpecPrimitive          = SpecString | SpecSymbol | SpecBytecodeNumber | SpecMisc; // It's any non-Object JSValue.
+static const SpeculatedType SpecEmpty              = 1ull << 34; // It's definitely an empty value marker.
+static const SpeculatedType SpecBytecodeTop        = SpecHeapTop | SpecEmpty; // It can be any of the above, except for SpecInt52Only and SpecDoubleImpureNaN. Corresponds to what could be found in a bytecode local.
+static const SpeculatedType SpecFullTop            = SpecBytecodeTop | SpecFullNumber; // It can be anything that bytecode could see plus exotic encodings of numbers.
+
+typedef bool (*SpeculatedTypeChecker)(SpeculatedType);
+
+// Dummy prediction checker, only useful if someone insists on requiring a prediction checker.
+inline bool isAnySpeculation(SpeculatedType)
+{
+    return true;
+}
+
+inline bool isCellSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecCell) && !(value & ~SpecCell);
+}
+
+inline bool isCellOrOtherSpeculation(SpeculatedType value)
+{
+    return !!value && !(value & ~(SpecCell | SpecOther));
+}
+
+inline bool isNotCellSpeculation(SpeculatedType value)
+{
+    return !(value & SpecCell) && value;
+}
+
+inline bool isObjectSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecObject) && !(value & ~SpecObject);
+}
+
+inline bool isObjectOrOtherSpeculation(SpeculatedType value)
+{
+    return !!(value & (SpecObject | SpecOther)) && !(value & ~(SpecObject | SpecOther));
+}
+
+inline bool isFinalObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecFinalObject;
+}
+
+inline bool isFinalObjectOrOtherSpeculation(SpeculatedType value)
+{
+    return !!(value & (SpecFinalObject | SpecOther)) && !(value & ~(SpecFinalObject | SpecOther));
+}
+
+inline bool isStringIdentSpeculation(SpeculatedType value)
+{
+    return value == SpecStringIdent;
+}
+
+inline bool isNotStringVarSpeculation(SpeculatedType value)
+{
+    return !(value & SpecStringVar);
+}
+
+inline bool isStringSpeculation(SpeculatedType value)
+{
+    return !!value && (value & SpecString) == value;
+}
+
+inline bool isNotStringSpeculation(SpeculatedType value)
+{
+    return value && !(value & SpecString);
+}
+
+inline bool isStringOrOtherSpeculation(SpeculatedType value)
+{
+    return !!value && (value & (SpecString | SpecOther)) == value;
+}
+
+inline bool isSymbolSpeculation(SpeculatedType value)
+{
+    return value == SpecSymbol;
+}
+
+inline bool isArraySpeculation(SpeculatedType value)
+{
+    return value == SpecArray;
+}
+
+inline bool isFunctionSpeculation(SpeculatedType value)
+{
+    return value == SpecFunction;
+}
+
+inline bool isProxyObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecProxyObject;
+}
+
+inline bool isDerivedArraySpeculation(SpeculatedType value)
+{
+    return value == SpecDerivedArray;
+}
+
+inline bool isInt8ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecInt8Array;
+}
+
+inline bool isInt16ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecInt16Array;
+}
+
+inline bool isInt32ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecInt32Array;
+}
+
+inline bool isUint8ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecUint8Array;
+}
+
+inline bool isUint8ClampedArraySpeculation(SpeculatedType value)
+{
+    return value == SpecUint8ClampedArray;
+}
+
+inline bool isUint16ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecUint16Array;
+}
+
+inline bool isUint32ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecUint32Array;
+}
+
+inline bool isFloat32ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecFloat32Array;
+}
+
+inline bool isFloat64ArraySpeculation(SpeculatedType value)
+{
+    return value == SpecFloat64Array;
+}
+
+inline bool isDirectArgumentsSpeculation(SpeculatedType value)
+{
+    return value == SpecDirectArguments;
+}
+
+inline bool isScopedArgumentsSpeculation(SpeculatedType value)
+{
+    return value == SpecScopedArguments;
+}
+
+inline bool isActionableIntMutableArraySpeculation(SpeculatedType value)
+{
+    return isInt8ArraySpeculation(value)
+        || isInt16ArraySpeculation(value)
+        || isInt32ArraySpeculation(value)
+        || isUint8ArraySpeculation(value)
+        || isUint8ClampedArraySpeculation(value)
+        || isUint16ArraySpeculation(value)
+        || isUint32ArraySpeculation(value);
+}
+
+inline bool isActionableFloatMutableArraySpeculation(SpeculatedType value)
+{
+    return isFloat32ArraySpeculation(value)
+        || isFloat64ArraySpeculation(value);
+}
+
+inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value)
+{
+    return isActionableIntMutableArraySpeculation(value)
+        || isActionableFloatMutableArraySpeculation(value);
+}
+
+inline bool isActionableMutableArraySpeculation(SpeculatedType value)
+{
+    return isArraySpeculation(value)
+        || isActionableTypedMutableArraySpeculation(value);
+}
+
+inline bool isActionableArraySpeculation(SpeculatedType value)
+{
+    return isStringSpeculation(value)
+        || isDirectArgumentsSpeculation(value)
+        || isScopedArgumentsSpeculation(value)
+        || isActionableMutableArraySpeculation(value);
+}
+
+inline bool isArrayOrOtherSpeculation(SpeculatedType value)
+{
+    return !!(value & (SpecArray | SpecOther)) && !(value & ~(SpecArray | SpecOther));
+}
+
+inline bool isStringObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecStringObject;
+}
+
+inline bool isStringOrStringObjectSpeculation(SpeculatedType value)
+{
+    return !!value && !(value & ~(SpecString | SpecStringObject));
+}
+
+inline bool isRegExpObjectSpeculation(SpeculatedType value)
+{
+    return value == SpecRegExpObject;
+}
+
+inline bool isBoolInt32Speculation(SpeculatedType value)
+{
+    return value == SpecBoolInt32;
+}
+
+inline bool isInt32Speculation(SpeculatedType value)
+{
+    return value && !(value & ~SpecInt32Only);
+}
+
+inline bool isNotInt32Speculation(SpeculatedType value)
+{
+    return value && !(value & SpecInt32Only);
+}
+
+inline bool isInt32OrBooleanSpeculation(SpeculatedType value)
+{
+    return value && !(value & ~(SpecBoolean | SpecInt32Only));
+}
+
+inline bool isInt32SpeculationForArithmetic(SpeculatedType value)
+{
+    return !(value & (SpecFullDouble | SpecInt52Only));
+}
+
+inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value)
+{
+    return !(value & (SpecFullDouble | SpecInt52Only));
+}
+
+inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value)
+{
+    return isInt32OrBooleanSpeculation(value & ~SpecOther);
+}
+
+inline bool isInt52Speculation(SpeculatedType value)
+{
+    return value == SpecInt52Only;
+}
+
+inline bool isAnyIntSpeculation(SpeculatedType value)
+{
+    return !!value && (value & SpecAnyInt) == value;
+}
+
+inline bool isAnyIntAsDoubleSpeculation(SpeculatedType value)
+{
+    return value == SpecAnyIntAsDouble;
+}
+
+inline bool isDoubleRealSpeculation(SpeculatedType value)
+{
+    return !!value && (value & SpecDoubleReal) == value;
+}
+
+inline bool isDoubleSpeculation(SpeculatedType value)
+{
+    return !!value && (value & SpecFullDouble) == value;
+}
+
+inline bool isDoubleSpeculationForArithmetic(SpeculatedType value)
+{
+    return !!(value & SpecFullDouble);
+}
+
+inline bool isBytecodeRealNumberSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecBytecodeRealNumber) && !(value & ~SpecBytecodeRealNumber);
+}
+
+inline bool isFullRealNumberSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecFullRealNumber) && !(value & ~SpecFullRealNumber);
+}
+
+inline bool isBytecodeNumberSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecBytecodeNumber) && !(value & ~SpecBytecodeNumber);
+}
+
+inline bool isFullNumberSpeculation(SpeculatedType value)
+{
+    return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber);
+}
+
+inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value)
+{
+    return value && !(value & ~(SpecFullNumber | SpecBoolean));
+}
+
+inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value)
+{
+    return isFullNumberOrBooleanSpeculation(value & ~SpecOther);
+}
+
+inline bool isBooleanSpeculation(SpeculatedType value)
+{
+    return value == SpecBoolean;
+}
+
+inline bool isNotBooleanSpeculation(SpeculatedType value)
+{
+    return value && !(value & SpecBoolean);
+}
+
+inline bool isOtherSpeculation(SpeculatedType value)
+{
+    return value == SpecOther;
+}
+
+inline bool isMiscSpeculation(SpeculatedType value)
+{
+    return !!value && !(value & ~SpecMisc);
+}
+
+inline bool isOtherOrEmptySpeculation(SpeculatedType value)
+{
+    return !value || value == SpecOther;
+}
+
+inline bool isEmptySpeculation(SpeculatedType value)
+{
+    return value == SpecEmpty;
+}
+
+inline bool isUntypedSpeculationForArithmetic(SpeculatedType value)
+{
+    return !!(value & ~(SpecFullNumber | SpecBoolean));
+}
+
+inline bool isUntypedSpeculationForBitOps(SpeculatedType value)
+{
+    return !!(value & ~(SpecFullNumber | SpecBoolean | SpecOther));
+}
+
+void dumpSpeculation(PrintStream&, SpeculatedType);
+void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType);
+
+MAKE_PRINT_ADAPTOR(SpeculationDump, SpeculatedType, dumpSpeculation);
+MAKE_PRINT_ADAPTOR(AbbreviatedSpeculationDump, SpeculatedType, dumpSpeculationAbbreviated);
+
+// Merge two predictions. Note that currently this just does left | right. It may
+// seem tempting to do so directly, but you would be doing so at your own peril,
+// since the merging protocol SpeculatedType may change at any time (and has already
+// changed several times in its history).
+inline SpeculatedType mergeSpeculations(SpeculatedType left, SpeculatedType right)
+{
+    return left | right;
+}
+
+template
+inline bool mergeSpeculation(T& left, SpeculatedType right)
+{
+    SpeculatedType newSpeculation = static_cast(mergeSpeculations(static_cast(left), right));
+    bool result = newSpeculation != static_cast(left);
+    left = newSpeculation;
+    return result;
+}
+
+inline bool speculationChecked(SpeculatedType actual, SpeculatedType desired)
+{
+    return (actual | desired) == desired;
+}
+
+SpeculatedType speculationFromClassInfo(const ClassInfo*);
+SpeculatedType speculationFromStructure(Structure*);
+SpeculatedType speculationFromCell(JSCell*);
+SpeculatedType speculationFromValue(JSValue);
+SpeculatedType speculationFromJSType(JSType);
+
+SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views.
+TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType);
+
+SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType);
+
+bool valuesCouldBeEqual(SpeculatedType, SpeculatedType);
+
+// Precise computation of the type of the result of a double computation after we
+// already know that the inputs are doubles and that the result must be a double. Use
+// the closest one of these that applies.
+SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleNegation(SpeculatedType);
+SpeculatedType typeOfDoubleAbs(SpeculatedType);
+SpeculatedType typeOfDoubleRounding(SpeculatedType);
+SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType);
+
+// This conservatively models the behavior of arbitrary double operations.
+SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType);
+SpeculatedType typeOfDoubleUnaryOp(SpeculatedType);
+
+} // namespace JSC
diff --git a/bytecode/StructureSet.cpp b/bytecode/StructureSet.cpp
new file mode 100644
index 0000000..2ccb8f0
--- /dev/null
+++ b/bytecode/StructureSet.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "StructureSet.h"
+
+#include "TrackedReferences.h"
+#include 
+
+namespace JSC {
+
+void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    CommaPrinter comma;
+    out.print("[");
+    forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); });
+    out.print("]");
+}
+
+void StructureSet::dump(PrintStream& out) const
+{
+    dumpInContext(out, nullptr);
+}
+
+} // namespace JSC
+
diff --git a/bytecode/StructureSet.h b/bytecode/StructureSet.h
new file mode 100644
index 0000000..8654ca5
--- /dev/null
+++ b/bytecode/StructureSet.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ArrayProfile.h"
+#include "DumpContext.h"
+#include "SpeculatedType.h"
+#include "Structure.h"
+#include 
+
+namespace JSC {
+
+class TrackedReferences;
+
+class StructureSet : public TinyPtrSet {
+public:
+    // I really want to do this:
+    // using TinyPtrSet::TinyPtrSet;
+    //
+    // But I can't because Windows.
+    
+    StructureSet()
+    {
+    }
+    
+    StructureSet(Structure* structure)
+        : TinyPtrSet(structure)
+    {
+    }
+    
+    ALWAYS_INLINE StructureSet(const StructureSet& other)
+        : TinyPtrSet(other)
+    {
+    }
+    
+    Structure* onlyStructure() const
+    {
+        return onlyEntry();
+    }
+    
+    void dumpInContext(PrintStream&, DumpContext*) const;
+    void dump(PrintStream&) const;
+};
+
+} // namespace JSC
diff --git a/bytecode/StructureStubClearingWatchpoint.cpp b/bytecode/StructureStubClearingWatchpoint.cpp
new file mode 100644
index 0000000..f27e507
--- /dev/null
+++ b/bytecode/StructureStubClearingWatchpoint.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012, 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "StructureStubClearingWatchpoint.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint()
+{
+    for (auto current = WTFMove(m_next); current; current = WTFMove(current->m_next)) { }
+}
+
+StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push(
+    const ObjectPropertyCondition& key,
+    WatchpointsOnStructureStubInfo& holder,
+    std::unique_ptr& head)
+{
+    head = std::make_unique(key, holder, WTFMove(head));
+    return head.get();
+}
+
+void StructureStubClearingWatchpoint::fireInternal(const FireDetail&)
+{
+    if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) {
+        // This will implicitly cause my own demise: stub reset removes all watchpoints.
+        // That works, because deleting a watchpoint removes it from the set's list, and
+        // the set's list traversal for firing is robust against the set changing.
+        ConcurrentJSLocker locker(m_holder.codeBlock()->m_lock);
+        m_holder.stubInfo()->reset(m_holder.codeBlock());
+        return;
+    }
+
+    if (m_key.kind() == PropertyCondition::Presence) {
+        // If this was a presence condition, let's watch the property for replacements. This is profitable
+        // for the DFG, which will want the replacement set to be valid in order to do constant folding.
+        VM& vm = *Heap::heap(m_key.object())->vm();
+        m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset());
+    }
+
+    m_key.object()->structure()->addTransitionWatchpoint(this);
+}
+
+WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo()
+{
+}
+
+StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key)
+{
+    return StructureStubClearingWatchpoint::push(key, *this, m_head);
+}
+
+StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
+    std::unique_ptr& holderRef, CodeBlock* codeBlock,
+    StructureStubInfo* stubInfo, const ObjectPropertyCondition& key)
+{
+    if (!holderRef)
+        holderRef = std::make_unique(codeBlock, stubInfo);
+    else {
+        ASSERT(holderRef->m_codeBlock == codeBlock);
+        ASSERT(holderRef->m_stubInfo == stubInfo);
+    }
+    
+    return holderRef->addWatchpoint(key);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/bytecode/StructureStubClearingWatchpoint.h b/bytecode/StructureStubClearingWatchpoint.h
new file mode 100644
index 0000000..665c56a
--- /dev/null
+++ b/bytecode/StructureStubClearingWatchpoint.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "ObjectPropertyCondition.h"
+#include "Watchpoint.h"
+
+#if ENABLE(JIT)
+
+#include 
+#include 
+
+namespace JSC {
+
+class CodeBlock;
+class StructureStubInfo;
+class WatchpointsOnStructureStubInfo;
+
+class StructureStubClearingWatchpoint : public Watchpoint {
+    WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    StructureStubClearingWatchpoint(
+        const ObjectPropertyCondition& key,
+        WatchpointsOnStructureStubInfo& holder,
+        std::unique_ptr next)
+        : m_key(key)
+        , m_holder(holder)
+        , m_next(WTFMove(next))
+    {
+    }
+    
+    virtual ~StructureStubClearingWatchpoint();
+    
+    static StructureStubClearingWatchpoint* push(
+        const ObjectPropertyCondition& key,
+        WatchpointsOnStructureStubInfo& holder,
+        std::unique_ptr& head);
+
+protected:
+    void fireInternal(const FireDetail&) override;
+
+private:
+    ObjectPropertyCondition m_key;
+    WatchpointsOnStructureStubInfo& m_holder;
+    std::unique_ptr m_next;
+};
+
+class WatchpointsOnStructureStubInfo {
+    WTF_MAKE_NONCOPYABLE(WatchpointsOnStructureStubInfo);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo)
+        : m_codeBlock(codeBlock)
+        , m_stubInfo(stubInfo)
+    {
+    }
+    
+    ~WatchpointsOnStructureStubInfo();
+    
+    StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key);
+    
+    static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint(
+        std::unique_ptr& holderRef,
+        CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key);
+    
+    CodeBlock* codeBlock() const { return m_codeBlock; }
+    StructureStubInfo* stubInfo() const { return m_stubInfo; }
+    
+private:
+    CodeBlock* m_codeBlock;
+    StructureStubInfo* m_stubInfo;
+    std::unique_ptr m_head;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/bytecode/StructureStubInfo.cpp b/bytecode/StructureStubInfo.cpp
new file mode 100644
index 0000000..38e5cef
--- /dev/null
+++ b/bytecode/StructureStubInfo.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2008, 2014-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "StructureStubInfo.h"
+
+#include "JSObject.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
+#include "Repatch.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+
+static const bool verbose = false;
+
+StructureStubInfo::StructureStubInfo(AccessType accessType)
+    : callSiteIndex(UINT_MAX)
+    , accessType(accessType)
+    , cacheType(CacheType::Unset)
+    , countdown(1) // For a totally clear stub, we'll patch it after the first execution.
+    , repatchCount(0)
+    , numberOfCoolDowns(0)
+    , bufferingCountdown(Options::repatchBufferingCountdown())
+    , resetByGC(false)
+    , tookSlowPath(false)
+    , everConsidered(false)
+{
+}
+
+StructureStubInfo::~StructureStubInfo()
+{
+}
+
+void StructureStubInfo::initGetByIdSelf(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+    cacheType = CacheType::GetByIdSelf;
+    
+    u.byIdSelf.baseObjectStructure.set(
+        *codeBlock->vm(), codeBlock, baseObjectStructure);
+    u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initArrayLength()
+{
+    cacheType = CacheType::ArrayLength;
+}
+
+void StructureStubInfo::initPutByIdReplace(CodeBlock* codeBlock, Structure* baseObjectStructure, PropertyOffset offset)
+{
+    cacheType = CacheType::PutByIdReplace;
+    
+    u.byIdSelf.baseObjectStructure.set(
+        *codeBlock->vm(), codeBlock, baseObjectStructure);
+    u.byIdSelf.offset = offset;
+}
+
+void StructureStubInfo::initStub(CodeBlock*, std::unique_ptr stub)
+{
+    cacheType = CacheType::Stub;
+    u.stub = stub.release();
+}
+
+void StructureStubInfo::deref()
+{
+    switch (cacheType) {
+    case CacheType::Stub:
+        delete u.stub;
+        return;
+    case CacheType::Unset:
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+    case CacheType::ArrayLength:
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void StructureStubInfo::aboutToDie()
+{
+    switch (cacheType) {
+    case CacheType::Stub:
+        u.stub->aboutToDie();
+        return;
+    case CacheType::Unset:
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+    case CacheType::ArrayLength:
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+AccessGenerationResult StructureStubInfo::addAccessCase(
+    CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr accessCase)
+{
+    VM& vm = *codeBlock->vm();
+    
+    if (verbose)
+        dataLog("Adding access case: ", accessCase, "\n");
+    
+    if (!accessCase)
+        return AccessGenerationResult::GaveUp;
+    
+    AccessGenerationResult result;
+    
+    if (cacheType == CacheType::Stub) {
+        result = u.stub->addCase(vm, codeBlock, *this, ident, WTFMove(accessCase));
+        
+        if (verbose)
+            dataLog("Had stub, result: ", result, "\n");
+
+        if (!result.buffered()) {
+            bufferedStructures.clear();
+            return result;
+        }
+    } else {
+        std::unique_ptr access = std::make_unique();
+        
+        Vector, 2> accessCases;
+        
+        std::unique_ptr previousCase =
+            AccessCase::fromStructureStubInfo(vm, codeBlock, *this);
+        if (previousCase)
+            accessCases.append(WTFMove(previousCase));
+        
+        accessCases.append(WTFMove(accessCase));
+        
+        result = access->addCases(vm, codeBlock, *this, ident, WTFMove(accessCases));
+        
+        if (verbose)
+            dataLog("Created stub, result: ", result, "\n");
+
+        if (!result.buffered()) {
+            bufferedStructures.clear();
+            return result;
+        }
+        
+        initStub(codeBlock, WTFMove(access));
+    }
+    
+    RELEASE_ASSERT(!result.generatedSomeCode());
+    
+    // If we didn't buffer any cases then bail. If this made no changes then we'll just try again
+    // subject to cool-down.
+    if (!result.buffered()) {
+        if (verbose)
+            dataLog("Didn't buffer anything, bailing.\n");
+        bufferedStructures.clear();
+        return result;
+    }
+    
+    // The buffering countdown tells us if we should be repatching now.
+    if (bufferingCountdown) {
+        if (verbose)
+            dataLog("Countdown is too high: ", bufferingCountdown, ".\n");
+        return result;
+    }
+    
+    // Forget the buffered structures so that all future attempts to cache get fully handled by the
+    // PolymorphicAccess.
+    bufferedStructures.clear();
+    
+    result = u.stub->regenerate(vm, codeBlock, *this, ident);
+    
+    if (verbose)
+        dataLog("Regeneration result: ", result, "\n");
+    
+    RELEASE_ASSERT(!result.buffered());
+    
+    if (!result.generatedSomeCode())
+        return result;
+    
+    // If we generated some code then we don't want to attempt to repatch in the future until we
+    // gather enough cases.
+    bufferingCountdown = Options::repatchBufferingCountdown();
+    return result;
+}
+
+void StructureStubInfo::reset(CodeBlock* codeBlock)
+{
+    bufferedStructures.clear();
+    
+    if (cacheType == CacheType::Unset)
+        return;
+    
+    if (Options::verboseOSR()) {
+        // This can be called from GC destructor calls, so we don't try to do a full dump
+        // of the CodeBlock.
+        dataLog("Clearing structure cache (kind ", static_cast(accessType), ") in ", RawPointer(codeBlock), ".\n");
+    }
+
+    switch (accessType) {
+    case AccessType::GetPure:
+        resetGetByID(codeBlock, *this, GetByIDKind::Pure);
+        break;
+    case AccessType::Get:
+        resetGetByID(codeBlock, *this, GetByIDKind::Normal);
+        break;
+    case AccessType::Put:
+        resetPutByID(codeBlock, *this);
+        break;
+    case AccessType::In:
+        resetIn(codeBlock, *this);
+        break;
+    }
+    
+    deref();
+    cacheType = CacheType::Unset;
+}
+
+void StructureStubInfo::visitWeakReferences(CodeBlock* codeBlock)
+{
+    VM& vm = *codeBlock->vm();
+    
+    bufferedStructures.genericFilter(
+        [&] (Structure* structure) -> bool {
+            return Heap::isMarked(structure);
+        });
+
+    switch (cacheType) {
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+        if (Heap::isMarked(u.byIdSelf.baseObjectStructure.get()))
+            return;
+        break;
+    case CacheType::Stub:
+        if (u.stub->visitWeak(vm))
+            return;
+        break;
+    default:
+        return;
+    }
+
+    reset(codeBlock);
+    resetByGC = true;
+}
+
+bool StructureStubInfo::propagateTransitions(SlotVisitor& visitor)
+{
+    switch (cacheType) {
+    case CacheType::Unset:
+    case CacheType::ArrayLength:
+        return true;
+    case CacheType::GetByIdSelf:
+    case CacheType::PutByIdReplace:
+        return u.byIdSelf.baseObjectStructure->markIfCheap(visitor);
+    case CacheType::Stub:
+        return u.stub->propagateTransitions(visitor);
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return true;
+}
+
+bool StructureStubInfo::containsPC(void* pc) const
+{
+    if (cacheType != CacheType::Stub)
+        return false;
+    return u.stub->containsPC(pc);
+}
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
diff --git a/bytecode/StructureStubInfo.h b/bytecode/StructureStubInfo.h
new file mode 100644
index 0000000..39b721f
--- /dev/null
+++ b/bytecode/StructureStubInfo.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeOrigin.h"
+#include "Instruction.h"
+#include "JITStubRoutine.h"
+#include "MacroAssembler.h"
+#include "ObjectPropertyConditionSet.h"
+#include "Options.h"
+#include "RegisterSet.h"
+#include "Structure.h"
+#include "StructureSet.h"
+#include "StructureStubClearingWatchpoint.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+
+class AccessCase;
+class AccessGenerationResult;
+class PolymorphicAccess;
+
+enum class AccessType : int8_t {
+    Get,
+    GetPure,
+    Put,
+    In
+};
+
+enum class CacheType : int8_t {
+    Unset,
+    GetByIdSelf,
+    PutByIdReplace,
+    Stub,
+    ArrayLength
+};
+
+class StructureStubInfo {
+    WTF_MAKE_NONCOPYABLE(StructureStubInfo);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    StructureStubInfo(AccessType);
+    ~StructureStubInfo();
+
+    void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+    void initArrayLength();
+    void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset);
+    void initStub(CodeBlock*, std::unique_ptr);
+
+    AccessGenerationResult addAccessCase(CodeBlock*, const Identifier&, std::unique_ptr);
+
+    void reset(CodeBlock*);
+
+    void deref();
+    void aboutToDie();
+
+    // Check if the stub has weak references that are dead. If it does, then it resets itself,
+    // either entirely or just enough to ensure that those dead pointers don't get used anymore.
+    void visitWeakReferences(CodeBlock*);
+    
+    // This returns true if it has marked everything that it will ever mark.
+    bool propagateTransitions(SlotVisitor&);
+        
+    ALWAYS_INLINE bool considerCaching(CodeBlock* codeBlock, Structure* structure)
+    {
+        // We never cache non-cells.
+        if (!structure)
+            return false;
+        
+        // This method is called from the Optimize variants of IC slow paths. The first part of this
+        // method tries to determine if the Optimize variant should really behave like the
+        // non-Optimize variant and leave the IC untouched.
+        //
+        // If we determine that we should do something to the IC then the next order of business is
+        // to determine if this Structure would impact the IC at all. We know that it won't, if we
+        // have already buffered something on its behalf. That's what the bufferedStructures set is
+        // for.
+        
+        everConsidered = true;
+        if (!countdown) {
+            // Check if we have been doing repatching too frequently. If so, then we should cool off
+            // for a while.
+            WTF::incrementWithSaturation(repatchCount);
+            if (repatchCount > Options::repatchCountForCoolDown()) {
+                // We've been repatching too much, so don't do it now.
+                repatchCount = 0;
+                // The amount of time we require for cool-down depends on the number of times we've
+                // had to cool down in the past. The relationship is exponential. The max value we
+                // allow here is 2^256 - 2, since the slow paths may increment the count to indicate
+                // that they'd like to temporarily skip patching just this once.
+                countdown = WTF::leftShiftWithSaturation(
+                    static_cast(Options::initialCoolDownCount()),
+                    numberOfCoolDowns,
+                    static_cast(std::numeric_limits::max() - 1));
+                WTF::incrementWithSaturation(numberOfCoolDowns);
+                
+                // We may still have had something buffered. Trigger generation now.
+                bufferingCountdown = 0;
+                return true;
+            }
+            
+            // We don't want to return false due to buffering indefinitely.
+            if (!bufferingCountdown) {
+                // Note that when this returns true, it's possible that we will not even get an
+                // AccessCase because this may cause Repatch.cpp to simply do an in-place
+                // repatching.
+                return true;
+            }
+            
+            bufferingCountdown--;
+            
+            // Now protect the IC buffering. We want to proceed only if this is a structure that
+            // we don't already have a case buffered for. Note that if this returns true but the
+            // bufferingCountdown is not zero then we will buffer the access case for later without
+            // immediately generating code for it.
+            bool isNewlyAdded = bufferedStructures.add(structure);
+            if (isNewlyAdded) {
+                VM& vm = *codeBlock->vm();
+                vm.heap.writeBarrier(codeBlock);
+            }
+            return isNewlyAdded;
+        }
+        countdown--;
+        return false;
+    }
+
+    bool containsPC(void* pc) const;
+
+    CodeOrigin codeOrigin;
+    CallSiteIndex callSiteIndex;
+
+    union {
+        struct {
+            WriteBarrierBase baseObjectStructure;
+            PropertyOffset offset;
+        } byIdSelf;
+        PolymorphicAccess* stub;
+    } u;
+    
+    // Represents those structures that already have buffered AccessCases in the PolymorphicAccess.
+    // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same
+    // structure again during this buffering countdown, we will create an AccessCase object for it.
+    // That's not so bad - we'll get rid of the redundant ones once we regenerate.
+    StructureSet bufferedStructures;
+    
+    struct {
+        CodeLocationLabel start; // This is either the start of the inline IC for *byId caches, or the location of patchable jump for 'in' caches.
+        RegisterSet usedRegisters;
+        uint32_t inlineSize;
+        int32_t deltaFromStartToSlowPathCallLocation;
+        int32_t deltaFromStartToSlowPathStart;
+
+        int8_t baseGPR;
+        int8_t valueGPR;
+#if USE(JSVALUE32_64)
+        int8_t valueTagGPR;
+        int8_t baseTagGPR;
+#endif
+    } patch;
+
+    CodeLocationCall slowPathCallLocation() { return patch.start.callAtOffset(patch.deltaFromStartToSlowPathCallLocation); }
+    CodeLocationLabel doneLocation() { return patch.start.labelAtOffset(patch.inlineSize); }
+    CodeLocationLabel slowPathStartLocation() { return patch.start.labelAtOffset(patch.deltaFromStartToSlowPathStart); }
+    CodeLocationJump patchableJumpForIn()
+    { 
+        ASSERT(accessType == AccessType::In);
+        return patch.start.jumpAtOffset(0);
+    }
+
+    JSValueRegs valueRegs() const
+    {
+        return JSValueRegs(
+#if USE(JSVALUE32_64)
+            static_cast(patch.valueTagGPR),
+#endif
+            static_cast(patch.valueGPR));
+    }
+
+
+    AccessType accessType;
+    CacheType cacheType;
+    uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement.
+    uint8_t repatchCount;
+    uint8_t numberOfCoolDowns;
+    uint8_t bufferingCountdown;
+    bool resetByGC : 1;
+    bool tookSlowPath : 1;
+    bool everConsidered : 1;
+};
+
+inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo)
+{
+    return structureStubInfo.codeOrigin;
+}
+
+#else
+
+class StructureStubInfo;
+
+#endif // ENABLE(JIT)
+
+typedef HashMap StubInfoMap;
+
+} // namespace JSC
diff --git a/bytecode/SuperSampler.cpp b/bytecode/SuperSampler.cpp
new file mode 100644
index 0000000..a4e21f9
--- /dev/null
+++ b/bytecode/SuperSampler.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "SuperSampler.h"
+
+#include "MacroAssembler.h"
+#include "Options.h"
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+volatile uint32_t g_superSamplerCount;
+
+static StaticLock lock;
+static double in;
+static double out;
+
+void initializeSuperSampler()
+{
+    if (!Options::useSuperSampler())
+        return;
+
+    createThread(
+        "JSC Super Sampler",
+        [] () {
+            const int sleepQuantum = 10;
+            const int printingPeriod = 1000;
+            for (;;) {
+                for (int ms = 0; ms < printingPeriod; ms += sleepQuantum) {
+                    {
+                        LockHolder locker(lock);
+                        if (g_superSamplerCount)
+                            in++;
+                        else
+                            out++;
+                    }
+                    sleepMS(sleepQuantum);
+                }
+                printSuperSamplerState();
+                if (static_cast(g_superSamplerCount) < 0)
+                    dataLog("WARNING: Super sampler undercount detected!\n");
+            }
+        });
+}
+
+void resetSuperSamplerState()
+{
+    LockHolder locker(lock);
+    in = 0;
+    out = 0;
+}
+
+void printSuperSamplerState()
+{
+    if (!Options::useSuperSampler())
+        return;
+
+    LockHolder locker(lock);
+    double percentage = 100.0 * in / (in + out);
+    if (percentage != percentage)
+        percentage = 0.0;
+    dataLog("Percent time behind super sampler flag: ", percentage, "\n");
+}
+
+} // namespace JSC
+
diff --git a/bytecode/SuperSampler.h b/bytecode/SuperSampler.h
new file mode 100644
index 0000000..c90f6d4
--- /dev/null
+++ b/bytecode/SuperSampler.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+namespace JSC {
+
+class MacroAssembler;
+
+extern volatile uint32_t g_superSamplerCount;
+
+void initializeSuperSampler();
+
+class SuperSamplerScope {
+public:
+    SuperSamplerScope(bool doSample = true)
+        : m_doSample(doSample)
+    {
+        if (m_doSample)
+            g_superSamplerCount++;
+    }
+
+    ~SuperSamplerScope()
+    {
+        if (m_doSample)
+            g_superSamplerCount--;
+    }
+
+private:
+    bool m_doSample;
+};
+
+JS_EXPORT_PRIVATE void resetSuperSamplerState();
+JS_EXPORT_PRIVATE void printSuperSamplerState();
+
+} // namespace JSC
diff --git a/bytecode/ToThisStatus.cpp b/bytecode/ToThisStatus.cpp
new file mode 100644
index 0000000..23d1e08
--- /dev/null
+++ b/bytecode/ToThisStatus.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ToThisStatus.h"
+
+namespace JSC {
+
+ToThisStatus merge(ToThisStatus a, ToThisStatus b)
+{
+    switch (a) {
+    case ToThisOK:
+        return b;
+    case ToThisConflicted:
+        return ToThisConflicted;
+    case ToThisClearedByGC:
+        if (b == ToThisConflicted)
+            return ToThisConflicted;
+        return ToThisClearedByGC;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+    return ToThisConflicted;
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ToThisStatus status)
+{
+    switch (status) {
+    case ToThisOK:
+        out.print("OK");
+        return;
+    case ToThisConflicted:
+        out.print("Conflicted");
+        return;
+    case ToThisClearedByGC:
+        out.print("ClearedByGC");
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/bytecode/ToThisStatus.h b/bytecode/ToThisStatus.h
new file mode 100644
index 0000000..ded012a
--- /dev/null
+++ b/bytecode/ToThisStatus.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+
+namespace JSC {
+
+enum ToThisStatus {
+    ToThisOK,
+    ToThisConflicted,
+    ToThisClearedByGC
+};
+
+ToThisStatus merge(ToThisStatus, ToThisStatus);
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::ToThisStatus);
+
+} // namespace WTF
diff --git a/bytecode/TrackedReferences.cpp b/bytecode/TrackedReferences.cpp
new file mode 100644
index 0000000..ae213d5
--- /dev/null
+++ b/bytecode/TrackedReferences.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "TrackedReferences.h"
+
+#include "JSCInlines.h"
+#include 
+
+namespace JSC {
+
+TrackedReferences::TrackedReferences()
+{
+}
+
+TrackedReferences::~TrackedReferences()
+{
+}
+
+void TrackedReferences::add(JSCell* cell)
+{
+    if (cell)
+        m_references.add(cell);
+}
+
+void TrackedReferences::add(JSValue value)
+{
+    if (value.isCell())
+        add(value.asCell());
+}
+
+void TrackedReferences::check(JSCell* cell) const
+{
+    if (!cell)
+        return;
+    
+    if (m_references.contains(cell))
+        return;
+    
+    dataLog("Found untracked reference: ", JSValue(cell), "\n");
+    dataLog("All tracked references: ", *this, "\n");
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void TrackedReferences::check(JSValue value) const
+{
+    if (value.isCell())
+        check(value.asCell());
+}
+
+void TrackedReferences::dump(PrintStream& out) const
+{
+    CommaPrinter comma;
+    for (JSCell* cell : m_references)
+        out.print(comma, RawPointer(cell));
+}
+
+} // namespace JSC
+
diff --git a/bytecode/TrackedReferences.h b/bytecode/TrackedReferences.h
new file mode 100644
index 0000000..a102167
--- /dev/null
+++ b/bytecode/TrackedReferences.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "JSCJSValue.h"
+#include "JSCell.h"
+#include 
+#include 
+
+namespace JSC {
+
+class TrackedReferences {
+public:
+    TrackedReferences();
+    ~TrackedReferences();
+    
+    void add(JSCell*);
+    void add(JSValue);
+    
+    void check(JSCell*) const;
+    void check(JSValue) const;
+    
+    void dump(PrintStream&) const;
+    
+private:
+    HashSet m_references;
+};
+
+} // namespace JSC
diff --git a/bytecode/TypeLocation.h b/bytecode/TypeLocation.h
new file mode 100644
index 0000000..bc75923
--- /dev/null
+++ b/bytecode/TypeLocation.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "TypeSet.h"
+
+namespace JSC {
+
+enum TypeProfilerGlobalIDFlags {
+    TypeProfilerNeedsUniqueIDGeneration = -1,
+    TypeProfilerNoGlobalIDExists = -2,
+    TypeProfilerReturnStatement = -3
+};
+
+typedef intptr_t GlobalVariableID;
+
+class TypeLocation {
+public:
+    TypeLocation()
+        : m_lastSeenType(TypeNothing)
+        , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX)
+        , m_instructionTypeSet(TypeSet::create())
+        , m_globalTypeSet(nullptr)
+    {
+    }
+
+    GlobalVariableID m_globalVariableID;
+    RuntimeType m_lastSeenType;
+    intptr_t m_sourceID;
+    unsigned m_divotStart;
+    unsigned m_divotEnd;
+    unsigned m_divotForFunctionOffsetIfReturnStatement;
+    RefPtr m_instructionTypeSet;
+    RefPtr m_globalTypeSet;
+};
+
+} // namespace JSC
diff --git a/bytecode/UnlinkedCodeBlock.cpp b/bytecode/UnlinkedCodeBlock.cpp
new file mode 100644
index 0000000..53defbf
--- /dev/null
+++ b/bytecode/UnlinkedCodeBlock.cpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "UnlinkedCodeBlock.h"
+
+#include "BytecodeGenerator.h"
+#include "BytecodeRewriter.h"
+#include "ClassInfo.h"
+#include "CodeCache.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
+#include "JSString.h"
+#include "Parser.h"
+#include "PreciseJumpTargetsInlines.h"
+#include "SourceProvider.h"
+#include "Structure.h"
+#include "SymbolTable.h"
+#include "UnlinkedEvalCodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+#include 
+
+namespace JSC {
+
+const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) };
+
+UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    : Base(*vm, structure)
+    , m_numVars(0)
+    , m_numCalleeLocals(0)
+    , m_numParameters(0)
+    , m_globalObjectRegister(VirtualRegister())
+    , m_usesEval(info.usesEval())
+    , m_isStrictMode(info.isStrictMode())
+    , m_isConstructor(info.isConstructor())
+    , m_hasCapturedVariables(false)
+    , m_isBuiltinFunction(info.isBuiltinFunction())
+    , m_superBinding(static_cast(info.superBinding()))
+    , m_scriptMode(static_cast(info.scriptMode()))
+    , m_isArrowFunctionContext(info.isArrowFunctionContext())
+    , m_isClassContext(info.isClassContext())
+    , m_wasCompiledWithDebuggingOpcodes(debuggerMode == DebuggerMode::DebuggerOn || Options::forceDebuggerBytecodeGeneration())
+    , m_constructorKind(static_cast(info.constructorKind()))
+    , m_derivedContextType(static_cast(info.derivedContextType()))
+    , m_evalContextType(static_cast(info.evalContextType()))
+    , m_lineCount(0)
+    , m_endColumn(UINT_MAX)
+    , m_didOptimize(MixedTriState)
+    , m_parseMode(info.parseMode())
+    , m_features(0)
+    , m_codeType(codeType)
+    , m_arrayProfileCount(0)
+    , m_arrayAllocationProfileCount(0)
+    , m_objectAllocationProfileCount(0)
+    , m_valueProfileCount(0)
+    , m_llintCallLinkInfoCount(0)
+{
+    for (auto& constantRegisterIndex : m_linkTimeConstants)
+        constantRegisterIndex = 0;
+    ASSERT(m_constructorKind == static_cast(info.constructorKind()));
+}
+
+void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedCodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+    auto locker = holdLock(*thisObject);
+    for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr)
+        visitor.append(*ptr);
+    for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr)
+        visitor.append(*ptr);
+    visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size());
+    if (thisObject->m_unlinkedInstructions)
+        visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes());
+    if (thisObject->m_rareData) {
+        for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++)
+            visitor.append(thisObject->m_rareData->m_regexps[i]);
+    }
+}
+
+size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell)
+{
+    UnlinkedCodeBlock* thisObject = jsCast(cell);
+    size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0;
+    return Base::estimatedSize(cell) + extraSize;
+}
+
+int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+    ASSERT(bytecodeOffset < instructions().count());
+    int divot;
+    int startOffset;
+    int endOffset;
+    unsigned line;
+    unsigned column;
+    expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
+    return line;
+}
+
+inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info,
+    unsigned& line, unsigned& column) const
+{
+    switch (info.mode) {
+    case ExpressionRangeInfo::FatLineMode:
+        info.decodeFatLineMode(line, column);
+        break;
+    case ExpressionRangeInfo::FatColumnMode:
+        info.decodeFatColumnMode(line, column);
+        break;
+    case ExpressionRangeInfo::FatLineAndColumnMode: {
+        unsigned fatIndex = info.position;
+        ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex];
+        line = fatPos.line;
+        column = fatPos.column;
+        break;
+    }
+    } // switch
+}
+
+#ifndef NDEBUG
+static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column)
+{
+    const auto& instructions = instructionStream.unpackForDebugging();
+    OpcodeID opcode = instructions[instructionOffset].u.opcode;
+    const char* event = "";
+    if (opcode == op_debug) {
+        switch (instructions[instructionOffset + 1].u.operand) {
+        case WillExecuteProgram: event = " WillExecuteProgram"; break;
+        case DidExecuteProgram: event = " DidExecuteProgram"; break;
+        case DidEnterCallFrame: event = " DidEnterCallFrame"; break;
+        case DidReachBreakpoint: event = " DidReachBreakpoint"; break;
+        case WillLeaveCallFrame: event = " WillLeaveCallFrame"; break;
+        case WillExecuteStatement: event = " WillExecuteStatement"; break;
+        case WillExecuteExpression: event = " WillExecuteExpression"; break;
+        }
+    }
+    dataLogF("  [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event);
+}
+
+void UnlinkedCodeBlock::dumpExpressionRangeInfo()
+{
+    Vector& expressionInfo = m_expressionInfo;
+
+    size_t size = m_expressionInfo.size();
+    dataLogF("UnlinkedCodeBlock %p expressionRangeInfo[%zu] {\n", this, size);
+    for (size_t i = 0; i < size; i++) {
+        ExpressionRangeInfo& info = expressionInfo[i];
+        unsigned line;
+        unsigned column;
+        getLineAndColumn(info, line, column);
+        dumpLineColumnEntry(i, instructions(), info.instructionOffset, line, column);
+    }
+    dataLog("}\n");
+}
+#endif
+
+void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset,
+    int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
+{
+    ASSERT(bytecodeOffset < instructions().count());
+
+    if (!m_expressionInfo.size()) {
+        startOffset = 0;
+        endOffset = 0;
+        divot = 0;
+        line = 0;
+        column = 0;
+        return;
+    }
+
+    const Vector& expressionInfo = m_expressionInfo;
+
+    int low = 0;
+    int high = expressionInfo.size();
+    while (low < high) {
+        int mid = low + (high - low) / 2;
+        if (expressionInfo[mid].instructionOffset <= bytecodeOffset)
+            low = mid + 1;
+        else
+            high = mid;
+    }
+
+    if (!low)
+        low = 1;
+
+    const ExpressionRangeInfo& info = expressionInfo[low - 1];
+    startOffset = info.startOffset;
+    endOffset = info.endOffset;
+    divot = info.divotPoint;
+    getLineAndColumn(info, line, column);
+}
+
+void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset,
+    int divot, int startOffset, int endOffset, unsigned line, unsigned column)
+{
+    if (divot > ExpressionRangeInfo::MaxDivot) {
+        // Overflow has occurred, we can only give line number info for errors for this region
+        divot = 0;
+        startOffset = 0;
+        endOffset = 0;
+    } else if (startOffset > ExpressionRangeInfo::MaxOffset) {
+        // If the start offset is out of bounds we clear both offsets
+        // so we only get the divot marker. Error message will have to be reduced
+        // to line and charPosition number.
+        startOffset = 0;
+        endOffset = 0;
+    } else if (endOffset > ExpressionRangeInfo::MaxOffset) {
+        // The end offset is only used for additional context, and is much more likely
+        // to overflow (eg. function call arguments) so we are willing to drop it without
+        // dropping the rest of the range.
+        endOffset = 0;
+    }
+
+    unsigned positionMode =
+        (line <= ExpressionRangeInfo::MaxFatLineModeLine && column <= ExpressionRangeInfo::MaxFatLineModeColumn) 
+        ? ExpressionRangeInfo::FatLineMode
+        : (line <= ExpressionRangeInfo::MaxFatColumnModeLine && column <= ExpressionRangeInfo::MaxFatColumnModeColumn)
+        ? ExpressionRangeInfo::FatColumnMode
+        : ExpressionRangeInfo::FatLineAndColumnMode;
+
+    ExpressionRangeInfo info;
+    info.instructionOffset = instructionOffset;
+    info.divotPoint = divot;
+    info.startOffset = startOffset;
+    info.endOffset = endOffset;
+
+    info.mode = positionMode;
+    switch (positionMode) {
+    case ExpressionRangeInfo::FatLineMode:
+        info.encodeFatLineMode(line, column);
+        break;
+    case ExpressionRangeInfo::FatColumnMode:
+        info.encodeFatColumnMode(line, column);
+        break;
+    case ExpressionRangeInfo::FatLineAndColumnMode: {
+        createRareDataIfNecessary();
+        unsigned fatIndex = m_rareData->m_expressionInfoFatPositions.size();
+        ExpressionRangeInfo::FatPosition fatPos = { line, column };
+        m_rareData->m_expressionInfoFatPositions.append(fatPos);
+        info.position = fatIndex;
+    }
+    } // switch
+
+    m_expressionInfo.append(info);
+}
+
+bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot)
+{
+    static const bool verbose = false;
+    if (!m_rareData) {
+        if (verbose)
+            dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+        startDivot = UINT_MAX;
+        endDivot = UINT_MAX;
+        return false;
+    }
+
+    auto iter = m_rareData->m_typeProfilerInfoMap.find(bytecodeOffset);
+    if (iter == m_rareData->m_typeProfilerInfoMap.end()) {
+        if (verbose)
+            dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset);
+        startDivot = UINT_MAX;
+        endDivot = UINT_MAX;
+        return false;
+    }
+    
+    RareData::TypeProfilerExpressionRange& range = iter->value;
+    startDivot = range.m_startDivot;
+    endDivot = range.m_endDivot;
+    return true;
+}
+
+void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot)
+{
+    createRareDataIfNecessary();
+    RareData::TypeProfilerExpressionRange range;
+    range.m_startDivot = startDivot;
+    range.m_endDivot = endDivot;
+    m_rareData->m_typeProfilerInfoMap.set(instructionOffset, range);
+}
+
+UnlinkedCodeBlock::~UnlinkedCodeBlock()
+{
+}
+
+void UnlinkedCodeBlock::setInstructions(std::unique_ptr instructions)
+{
+    ASSERT(instructions);
+    {
+        auto locker = holdLock(*this);
+        m_unlinkedInstructions = WTFMove(instructions);
+    }
+    Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes());
+}
+
+const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const
+{
+    ASSERT(m_unlinkedInstructions.get());
+    return *m_unlinkedInstructions;
+}
+
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
+{
+    return handlerForIndex(bytecodeOffset, requiredHandler);
+}
+
+UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
+{
+    if (!m_rareData)
+        return nullptr;
+    return UnlinkedHandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
+}
+
+void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter)
+{
+    // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and
+    // the insertion information.
+
+    BytecodeGraph& graph = rewriter.graph();
+    UnlinkedInstruction* instructionsBegin = graph.instructions().begin();
+
+    for (int bytecodeOffset = 0, instructionCount = graph.instructions().size(); bytecodeOffset < instructionCount;) {
+        UnlinkedInstruction* current = instructionsBegin + bytecodeOffset;
+        OpcodeID opcodeID = current[0].u.opcode;
+        extractStoredJumpTargetsForBytecodeOffset(this, vm()->interpreter, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) {
+            relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset);
+        });
+        bytecodeOffset += opcodeLength(opcodeID);
+    }
+
+    // Then, exception handlers should be adjusted.
+    if (m_rareData) {
+        for (UnlinkedHandlerInfo& handler : m_rareData->m_exceptionHandlers) {
+            handler.target = rewriter.adjustAbsoluteOffset(handler.target);
+            handler.start = rewriter.adjustAbsoluteOffset(handler.start);
+            handler.end = rewriter.adjustAbsoluteOffset(handler.end);
+        }
+
+        for (size_t i = 0; i < m_rareData->m_opProfileControlFlowBytecodeOffsets.size(); ++i)
+            m_rareData->m_opProfileControlFlowBytecodeOffsets[i] = rewriter.adjustAbsoluteOffset(m_rareData->m_opProfileControlFlowBytecodeOffsets[i]);
+
+        if (!m_rareData->m_typeProfilerInfoMap.isEmpty()) {
+            HashMap adjustedTypeProfilerInfoMap;
+            for (auto& entry : m_rareData->m_typeProfilerInfoMap)
+                adjustedTypeProfilerInfoMap.set(rewriter.adjustAbsoluteOffset(entry.key), entry.value);
+            m_rareData->m_typeProfilerInfoMap.swap(adjustedTypeProfilerInfoMap);
+        }
+    }
+
+    for (size_t i = 0; i < m_propertyAccessInstructions.size(); ++i)
+        m_propertyAccessInstructions[i] = rewriter.adjustAbsoluteOffset(m_propertyAccessInstructions[i]);
+
+    for (size_t i = 0; i < m_expressionInfo.size(); ++i)
+        m_expressionInfo[i].instructionOffset = rewriter.adjustAbsoluteOffset(m_expressionInfo[i].instructionOffset);
+
+    // Then, modify the unlinked instructions.
+    rewriter.applyModification();
+
+    // And recompute the jump target based on the modified unlinked instructions.
+    m_jumpTargets.clear();
+    recomputePreciseJumpTargets(this, graph.instructions().begin(), graph.instructions().size(), m_jumpTargets);
+}
+
+void UnlinkedCodeBlock::shrinkToFit()
+{
+    auto locker = holdLock(*this);
+    
+    m_jumpTargets.shrinkToFit();
+    m_identifiers.shrinkToFit();
+    m_bitVectors.shrinkToFit();
+    m_constantRegisters.shrinkToFit();
+    m_constantsSourceCodeRepresentation.shrinkToFit();
+    m_functionDecls.shrinkToFit();
+    m_functionExprs.shrinkToFit();
+    m_propertyAccessInstructions.shrinkToFit();
+    m_expressionInfo.shrinkToFit();
+
+    if (m_rareData) {
+        m_rareData->m_exceptionHandlers.shrinkToFit();
+        m_rareData->m_regexps.shrinkToFit();
+        m_rareData->m_constantBuffers.shrinkToFit();
+        m_rareData->m_switchJumpTables.shrinkToFit();
+        m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+        m_rareData->m_expressionInfoFatPositions.shrinkToFit();
+    }
+}
+
+} // namespace JSC
diff --git a/bytecode/UnlinkedCodeBlock.h b/bytecode/UnlinkedCodeBlock.h
new file mode 100644
index 0000000..f057497
--- /dev/null
+++ b/bytecode/UnlinkedCodeBlock.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeConventions.h"
+#include "CodeSpecializationKind.h"
+#include "CodeType.h"
+#include "ConstructAbility.h"
+#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
+#include "Identifier.h"
+#include "JSCell.h"
+#include "JSString.h"
+#include "LockDuringMarking.h"
+#include "ParserModes.h"
+#include "RegExp.h"
+#include "SpecialPointer.h"
+#include "UnlinkedFunctionExecutable.h"
+#include "VariableEnvironment.h"
+#include "VirtualRegister.h"
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class BytecodeRewriter;
+class Debugger;
+class FunctionExecutable;
+class ParserError;
+class ScriptExecutable;
+class SourceCode;
+class SourceProvider;
+class UnlinkedCodeBlock;
+class UnlinkedFunctionCodeBlock;
+class UnlinkedFunctionExecutable;
+class UnlinkedInstructionStream;
+struct ExecutableInfo;
+
+typedef unsigned UnlinkedValueProfile;
+typedef unsigned UnlinkedArrayProfile;
+typedef unsigned UnlinkedArrayAllocationProfile;
+typedef unsigned UnlinkedObjectAllocationProfile;
+typedef unsigned UnlinkedLLIntCallLinkInfo;
+
+struct UnlinkedStringJumpTable {
+    struct OffsetLocation {
+        int32_t branchOffset;
+    };
+
+    typedef HashMap, OffsetLocation> StringOffsetTable;
+    StringOffsetTable offsetTable;
+
+    inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset)
+    {
+        StringOffsetTable::const_iterator end = offsetTable.end();
+        StringOffsetTable::const_iterator loc = offsetTable.find(value);
+        if (loc == end)
+            return defaultOffset;
+        return loc->value.branchOffset;
+    }
+
+};
+
+struct UnlinkedSimpleJumpTable {
+    Vector branchOffsets;
+    int32_t min;
+
+    int32_t offsetForValue(int32_t value, int32_t defaultOffset);
+    void add(int32_t key, int32_t offset)
+    {
+        if (!branchOffsets[key])
+            branchOffsets[key] = offset;
+    }
+};
+
+struct UnlinkedInstruction {
+    UnlinkedInstruction() { u.operand = 0; }
+    UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; }
+    UnlinkedInstruction(int operand) { u.operand = operand; }
+    union {
+        OpcodeID opcode;
+        int32_t operand;
+        unsigned index;
+    } u;
+};
+
+class UnlinkedCodeBlock : public JSCell {
+public:
+    typedef JSCell Base;
+    static const unsigned StructureFlags = Base::StructureFlags;
+
+    static const bool needsDestruction = true;
+
+    enum { CallFunction, ApplyFunction };
+
+    typedef UnlinkedInstruction Instruction;
+    typedef Vector UnpackedInstructions;
+
+    bool isConstructor() const { return m_isConstructor; }
+    bool isStrictMode() const { return m_isStrictMode; }
+    bool usesEval() const { return m_usesEval; }
+    SourceParseMode parseMode() const { return m_parseMode; }
+    bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+    DerivedContextType derivedContextType() const { return static_cast(m_derivedContextType); }
+    EvalContextType evalContextType() const { return static_cast(m_evalContextType); }
+    bool isArrowFunctionContext() const { return m_isArrowFunctionContext; }
+    bool isClassContext() const { return m_isClassContext; }
+
+    void addExpressionInfo(unsigned instructionOffset, int divot,
+        int startOffset, int endOffset, unsigned line, unsigned column);
+
+    void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot);
+
+    bool hasExpressionInfo() { return m_expressionInfo.size(); }
+    const Vector& expressionInfo() { return m_expressionInfo; }
+
+    // Special registers
+    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
+    void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; }
+
+    bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); }
+    void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; }
+    VirtualRegister globalObjectRegister() const { return m_globalObjectRegister; }
+
+    // Parameter information
+    void setNumParameters(int newValue) { m_numParameters = newValue; }
+    void addParameter() { m_numParameters++; }
+    unsigned numParameters() const { return m_numParameters; }
+
+    unsigned addRegExp(RegExp* r)
+    {
+        createRareDataIfNecessary();
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned size = m_rareData->m_regexps.size();
+        m_rareData->m_regexps.append(WriteBarrier(vm, this, r));
+        return size;
+    }
+    unsigned numberOfRegExps() const
+    {
+        if (!m_rareData)
+            return 0;
+        return m_rareData->m_regexps.size();
+    }
+    RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
+
+    // Constant Pools
+
+    size_t numberOfIdentifiers() const { return m_identifiers.size(); }
+    void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
+    const Identifier& identifier(int index) const { return m_identifiers[index]; }
+    const Vector& identifiers() const { return m_identifiers; }
+
+    const Vector& bitVectors() const { return m_bitVectors; }
+    BitVector& bitVector(size_t i) { return m_bitVectors[i]; }
+    unsigned addBitVector(BitVector&& bitVector)
+    {
+        m_bitVectors.append(WTFMove(bitVector));
+        return m_bitVectors.size() - 1;
+    }
+
+    unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other)
+    {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned result = m_constantRegisters.size();
+        m_constantRegisters.append(WriteBarrier());
+        m_constantRegisters.last().set(vm, this, v);
+        m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation);
+        return result;
+    }
+    unsigned addConstant(LinkTimeConstant type)
+    {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned result = m_constantRegisters.size();
+        ASSERT(result);
+        unsigned index = static_cast(type);
+        ASSERT(index < LinkTimeConstantCount);
+        m_linkTimeConstants[index] = result;
+        m_constantRegisters.append(WriteBarrier());
+        m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
+        return result;
+    }
+    unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type)
+    {
+        unsigned index = static_cast(type);
+        ASSERT(index < LinkTimeConstantCount);
+        return m_linkTimeConstants[index];
+    }
+    const Vector>& constantRegisters() { return m_constantRegisters; }
+    const WriteBarrier& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+    const Vector& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
+
+    // Jumps
+    size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
+    void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
+    unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
+    unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
+
+    UnlinkedHandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
+    UnlinkedHandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+
+    void shrinkToFit();
+
+    void setInstructions(std::unique_ptr);
+    const UnlinkedInstructionStream& instructions() const;
+
+    int numCalleeLocals() const { return m_numCalleeLocals; }
+
+    int m_numVars;
+    int m_numCapturedVars;
+    int m_numCalleeLocals;
+
+    // Jump Tables
+
+    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
+    UnlinkedSimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
+    UnlinkedSimpleJumpTable& switchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
+
+    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
+    UnlinkedStringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(UnlinkedStringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
+    UnlinkedStringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
+
+    unsigned addFunctionDecl(UnlinkedFunctionExecutable* n)
+    {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned size = m_functionDecls.size();
+        m_functionDecls.append(WriteBarrier());
+        m_functionDecls.last().set(vm, this, n);
+        return size;
+    }
+    UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+    size_t numberOfFunctionDecls() { return m_functionDecls.size(); }
+    unsigned addFunctionExpr(UnlinkedFunctionExecutable* n)
+    {
+        VM& vm = *this->vm();
+        auto locker = lockDuringMarking(vm.heap, *this);
+        unsigned size = m_functionExprs.size();
+        m_functionExprs.append(WriteBarrier());
+        m_functionExprs.last().set(vm, this, n);
+        return size;
+    }
+    UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
+    size_t numberOfFunctionExprs() { return m_functionExprs.size(); }
+
+    // Exception handling support
+    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
+    void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); }
+    UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+
+    UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; }
+    unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }
+    UnlinkedArrayAllocationProfile addArrayAllocationProfile() { return m_arrayAllocationProfileCount++; }
+    unsigned numberOfArrayAllocationProfiles() { return m_arrayAllocationProfileCount; }
+    UnlinkedObjectAllocationProfile addObjectAllocationProfile() { return m_objectAllocationProfileCount++; }
+    unsigned numberOfObjectAllocationProfiles() { return m_objectAllocationProfileCount; }
+    UnlinkedValueProfile addValueProfile() { return m_valueProfileCount++; }
+    unsigned numberOfValueProfiles() { return m_valueProfileCount; }
+
+    UnlinkedLLIntCallLinkInfo addLLIntCallLinkInfo() { return m_llintCallLinkInfoCount++; }
+    unsigned numberOfLLintCallLinkInfos() { return m_llintCallLinkInfoCount; }
+
+    CodeType codeType() const { return m_codeType; }
+
+    VirtualRegister thisRegister() const { return m_thisRegister; }
+    VirtualRegister scopeRegister() const { return m_scopeRegister; }
+
+    void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
+    {
+        m_propertyAccessInstructions.append(propertyAccessInstruction);
+    }
+
+    size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); }
+    const Vector& propertyAccessInstructions() const { return m_propertyAccessInstructions; }
+
+    typedef Vector ConstantBuffer;
+
+    size_t constantBufferCount() { ASSERT(m_rareData); return m_rareData->m_constantBuffers.size(); }
+    unsigned addConstantBuffer(unsigned length)
+    {
+        createRareDataIfNecessary();
+        unsigned size = m_rareData->m_constantBuffers.size();
+        m_rareData->m_constantBuffers.append(Vector(length));
+        return size;
+    }
+
+    const ConstantBuffer& constantBuffer(unsigned index) const
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_constantBuffers[index];
+    }
+
+    ConstantBuffer& constantBuffer(unsigned index)
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_constantBuffers[index];
+    }
+
+    bool hasRareData() const { return m_rareData.get(); }
+
+    int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+
+    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
+        int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
+
+    bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot);
+
+    void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned lineCount, unsigned endColumn)
+    {
+        m_features = features;
+        m_hasCapturedVariables = hasCapturedVariables;
+        m_lineCount = lineCount;
+        // For the UnlinkedCodeBlock, startColumn is always 0.
+        m_endColumn = endColumn;
+    }
+
+    const String& sourceURLDirective() const { return m_sourceURLDirective; }
+    const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+    void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+    void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
+    CodeFeatures codeFeatures() const { return m_features; }
+    bool hasCapturedVariables() const { return m_hasCapturedVariables; }
+    unsigned lineCount() const { return m_lineCount; }
+    ALWAYS_INLINE unsigned startColumn() const { return 0; }
+    unsigned endColumn() const { return m_endColumn; }
+
+    void addOpProfileControlFlowBytecodeOffset(size_t offset)
+    {
+        createRareDataIfNecessary();
+        m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset);
+    }
+    const Vector& opProfileControlFlowBytecodeOffsets() const
+    {
+        ASSERT(m_rareData);
+        return m_rareData->m_opProfileControlFlowBytecodeOffsets;
+    }
+    bool hasOpProfileControlFlowBytecodeOffsets() const
+    {
+        return m_rareData && !m_rareData->m_opProfileControlFlowBytecodeOffsets.isEmpty();
+    }
+
+    void dumpExpressionRangeInfo(); // For debugging purpose only.
+
+    bool wasCompiledWithDebuggingOpcodes() const { return m_wasCompiledWithDebuggingOpcodes; }
+
+    TriState didOptimize() const { return m_didOptimize; }
+    void setDidOptimize(TriState didOptimize) { m_didOptimize = didOptimize; }
+
+protected:
+    UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&, DebuggerMode);
+    ~UnlinkedCodeBlock();
+
+    void finishCreation(VM& vm)
+    {
+        Base::finishCreation(vm);
+    }
+
+private:
+    friend class BytecodeRewriter;
+    void applyModification(BytecodeRewriter&);
+
+    void createRareDataIfNecessary()
+    {
+        if (!m_rareData) {
+            auto locker = lockDuringMarking(*heap(), *this);
+            m_rareData = std::make_unique();
+        }
+    }
+
+    void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const;
+
+    int m_numParameters;
+
+    std::unique_ptr m_unlinkedInstructions;
+
+    VirtualRegister m_thisRegister;
+    VirtualRegister m_scopeRegister;
+    VirtualRegister m_globalObjectRegister;
+
+    String m_sourceURLDirective;
+    String m_sourceMappingURLDirective;
+
+    unsigned m_usesEval : 1;
+    unsigned m_isStrictMode : 1;
+    unsigned m_isConstructor : 1;
+    unsigned m_hasCapturedVariables : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_superBinding : 1;
+    unsigned m_scriptMode: 1;
+    unsigned m_isArrowFunctionContext : 1;
+    unsigned m_isClassContext : 1;
+    unsigned m_wasCompiledWithDebuggingOpcodes : 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_derivedContextType : 2;
+    unsigned m_evalContextType : 2;
+    unsigned m_lineCount;
+    unsigned m_endColumn;
+
+    TriState m_didOptimize;
+    SourceParseMode m_parseMode;
+    CodeFeatures m_features;
+    CodeType m_codeType;
+
+    Vector m_jumpTargets;
+
+    Vector m_propertyAccessInstructions;
+
+    // Constant Pools
+    Vector m_identifiers;
+    Vector m_bitVectors;
+    Vector> m_constantRegisters;
+    Vector m_constantsSourceCodeRepresentation;
+    typedef Vector> FunctionExpressionVector;
+    FunctionExpressionVector m_functionDecls;
+    FunctionExpressionVector m_functionExprs;
+    std::array m_linkTimeConstants;
+
+    unsigned m_arrayProfileCount;
+    unsigned m_arrayAllocationProfileCount;
+    unsigned m_objectAllocationProfileCount;
+    unsigned m_valueProfileCount;
+    unsigned m_llintCallLinkInfoCount;
+
+public:
+    struct RareData {
+        WTF_MAKE_FAST_ALLOCATED;
+    public:
+        Vector m_exceptionHandlers;
+
+        // Rare Constants
+        Vector> m_regexps;
+
+        // Buffers used for large array literals
+        Vector m_constantBuffers;
+
+        // Jump Tables
+        Vector m_switchJumpTables;
+        Vector m_stringSwitchJumpTables;
+
+        Vector m_expressionInfoFatPositions;
+
+        struct TypeProfilerExpressionRange {
+            unsigned m_startDivot;
+            unsigned m_endDivot;
+        };
+        HashMap m_typeProfilerInfoMap;
+        Vector m_opProfileControlFlowBytecodeOffsets;
+    };
+
+private:
+    std::unique_ptr m_rareData;
+    Vector m_expressionInfo;
+
+protected:
+    static void visitChildren(JSCell*, SlotVisitor&);
+    static size_t estimatedSize(JSCell*);
+
+public:
+    DECLARE_INFO;
+};
+
+}
diff --git a/bytecode/UnlinkedEvalCodeBlock.cpp b/bytecode/UnlinkedEvalCodeBlock.cpp
new file mode 100644
index 0000000..07f9916
--- /dev/null
+++ b/bytecode/UnlinkedEvalCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedEvalCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) };
+
+void UnlinkedEvalCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedEvalCodeBlock();
+}
+
+}
diff --git a/bytecode/UnlinkedEvalCodeBlock.h b/bytecode/UnlinkedEvalCodeBlock.h
new file mode 100644
index 0000000..3130ea4
--- /dev/null
+++ b/bytecode/UnlinkedEvalCodeBlock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    const Identifier& variable(unsigned index) { return m_variables[index]; }
+    unsigned numVariables() { return m_variables.size(); }
+    void adoptVariables(Vector& variables)
+    {
+        ASSERT(m_variables.isEmpty());
+        m_variables.swap(variables);
+    }
+
+private:
+    UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, EvalCode, info, debuggerMode)
+    {
+    }
+
+    Vector m_variables;
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/bytecode/UnlinkedFunctionCodeBlock.cpp b/bytecode/UnlinkedFunctionCodeBlock.cpp
new file mode 100644
index 0000000..151d560
--- /dev/null
+++ b/bytecode/UnlinkedFunctionCodeBlock.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) };
+
+void UnlinkedFunctionCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedFunctionCodeBlock();
+}
+
+}
diff --git a/bytecode/UnlinkedFunctionCodeBlock.h b/bytecode/UnlinkedFunctionCodeBlock.h
new file mode 100644
index 0000000..b5482b6
--- /dev/null
+++ b/bytecode/UnlinkedFunctionCodeBlock.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock {
+public:
+    typedef UnlinkedCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+private:
+    UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, codeType, info, debuggerMode)
+    {
+    }
+    
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/bytecode/UnlinkedFunctionExecutable.cpp b/bytecode/UnlinkedFunctionExecutable.cpp
new file mode 100644
index 0000000..066bc67
--- /dev/null
+++ b/bytecode/UnlinkedFunctionExecutable.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedFunctionExecutable.h"
+
+#include "BytecodeGenerator.h"
+#include "ClassInfo.h"
+#include "CodeCache.h"
+#include "Debugger.h"
+#include "ExecutableInfo.h"
+#include "FunctionOverrides.h"
+#include "JSCInlines.h"
+#include "Parser.h"
+#include "SourceProvider.h"
+#include "Structure.h"
+#include "UnlinkedFunctionCodeBlock.h"
+
+namespace JSC {
+
+static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell.");
+
+const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) };
+
+static UnlinkedFunctionCodeBlock* generateUnlinkedFunctionCodeBlock(
+    VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source,
+    CodeSpecializationKind kind, DebuggerMode debuggerMode,
+    UnlinkedFunctionKind functionKind, ParserError& error, SourceParseMode parseMode)
+{
+    JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin;
+    JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict;
+    JSParserScriptMode scriptMode = executable->scriptMode();
+    ASSERT(isFunctionParseMode(executable->parseMode()));
+    std::unique_ptr function = parse(
+        &vm, source, executable->name(), builtinMode, strictMode, scriptMode, executable->parseMode(), executable->superBinding(), error, nullptr);
+
+    if (!function) {
+        ASSERT(error.isValid());
+        return nullptr;
+    }
+
+    function->finishParsing(executable->name(), executable->functionMode());
+    executable->recordParse(function->features(), function->hasCapturedVariables());
+
+    bool isClassContext = executable->superBinding() == SuperBinding::Needed;
+
+    UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, ExecutableInfo(function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind(), scriptMode, executable->superBinding(), parseMode, executable->derivedContextType(), false, isClassContext, EvalContextType::FunctionEvalContext), debuggerMode);
+
+    error = BytecodeGenerator::generate(vm, function.get(), result, debuggerMode, executable->parentScopeTDZVariables());
+
+    if (error.isValid())
+        return nullptr;
+    return result;
+}
+
+UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& parentSource, SourceCode&& parentSourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType)
+    : Base(*vm, structure)
+    , m_firstLineOffset(node->firstLine() - parentSource.firstLine().oneBasedInt())
+    , m_lineCount(node->lastLine() - node->firstLine())
+    , m_unlinkedFunctionNameStart(node->functionNameStart() - parentSource.startOffset())
+    , m_unlinkedBodyStartColumn(node->startColumn())
+    , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn())
+    , m_startOffset(node->source().startOffset() - parentSource.startOffset())
+    , m_sourceLength(node->source().length())
+    , m_parametersStartOffset(node->parametersStart())
+    , m_typeProfilingStartOffset(node->functionKeywordStart())
+    , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1)
+    , m_parameterCount(node->parameterCount())
+    , m_functionLength(node->functionLength())
+    , m_features(0)
+    , m_sourceParseMode(node->parseMode())
+    , m_isInStrictContext(node->isInStrictContext())
+    , m_hasCapturedVariables(false)
+    , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction)
+    , m_constructAbility(static_cast(constructAbility))
+    , m_constructorKind(static_cast(node->constructorKind()))
+    , m_functionMode(static_cast(node->functionMode()))
+    , m_scriptMode(static_cast(scriptMode))
+    , m_superBinding(static_cast(node->superBinding()))
+    , m_derivedContextType(static_cast(derivedContextType))
+    , m_name(node->ident())
+    , m_ecmaName(node->ecmaName())
+    , m_inferredName(node->inferredName())
+    , m_parentSourceOverride(WTFMove(parentSourceOverride))
+    , m_classSource(node->classSource())
+{
+    // Make sure these bitfields are adequately wide.
+    ASSERT(m_constructAbility == static_cast(constructAbility));
+    ASSERT(m_constructorKind == static_cast(node->constructorKind()));
+    ASSERT(m_functionMode == static_cast(node->functionMode()));
+    ASSERT(m_scriptMode == static_cast(scriptMode));
+    ASSERT(m_superBinding == static_cast(node->superBinding()));
+    ASSERT(m_derivedContextType == static_cast(derivedContextType));
+
+    m_parentScopeTDZVariables.swap(parentScopeTDZVariables);
+}
+
+void UnlinkedFunctionExecutable::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedFunctionExecutable();
+}
+
+void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedFunctionExecutable* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+    visitor.append(thisObject->m_unlinkedCodeBlockForCall);
+    visitor.append(thisObject->m_unlinkedCodeBlockForConstruct);
+}
+
+FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& passedParentSource, std::optional overrideLineNumber, Intrinsic intrinsic)
+{
+    const SourceCode& parentSource = m_parentSourceOverride.isNull() ? passedParentSource : m_parentSourceOverride;
+    unsigned firstLine = parentSource.firstLine().oneBasedInt() + m_firstLineOffset;
+    unsigned startOffset = parentSource.startOffset() + m_startOffset;
+    unsigned lineCount = m_lineCount;
+
+    unsigned startColumn = linkedStartColumn(parentSource.startColumn().oneBasedInt());
+    unsigned endColumn = linkedEndColumn(startColumn);
+
+    SourceCode source(parentSource.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn);
+    FunctionOverrides::OverrideInfo overrideInfo;
+    bool hasFunctionOverride = false;
+
+    if (UNLIKELY(Options::functionOverrides())) {
+        hasFunctionOverride = FunctionOverrides::initializeOverrideFor(source, overrideInfo);
+        if (UNLIKELY(hasFunctionOverride)) {
+            firstLine = overrideInfo.firstLine;
+            lineCount = overrideInfo.lineCount;
+            startColumn = overrideInfo.startColumn;
+            endColumn = overrideInfo.endColumn;
+            source = overrideInfo.sourceCode;
+        }
+    }
+
+    FunctionExecutable* result = FunctionExecutable::create(vm, source, this, firstLine + lineCount, endColumn, intrinsic);
+    if (overrideLineNumber)
+        result->setOverrideLineNumber(*overrideLineNumber);
+
+    if (UNLIKELY(hasFunctionOverride)) {
+        result->overrideParameterAndTypeProfilingStartEndOffsets(
+            overrideInfo.parametersStartOffset,
+            overrideInfo.typeProfilingStartOffset,
+            overrideInfo.typeProfilingEndOffset);
+    }
+
+    return result;
+}
+
+UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(
+    const Identifier& name, ExecState& exec, const SourceCode& source, 
+    JSObject*& exception, int overrideLineNumber)
+{
+    ParserError error;
+    VM& vm = exec.vm();
+    auto& globalObject = *exec.lexicalGlobalObject();
+    CodeCache* codeCache = vm.codeCache();
+    DebuggerMode debuggerMode = globalObject.hasInteractiveDebugger() ? DebuggerOn : DebuggerOff;
+    UnlinkedFunctionExecutable* executable = codeCache->getUnlinkedGlobalFunctionExecutable(vm, name, source, debuggerMode, error);
+
+    if (globalObject.hasDebugger())
+        globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message());
+
+    if (error.isValid()) {
+        exception = error.toErrorObject(&globalObject, source, overrideLineNumber);
+        return nullptr;
+    }
+
+    return executable;
+}
+
+UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::unlinkedCodeBlockFor(
+    VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, 
+    DebuggerMode debuggerMode, ParserError& error, SourceParseMode parseMode)
+{
+    switch (specializationKind) {
+    case CodeForCall:
+        if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForCall.get())
+            return codeBlock;
+        break;
+    case CodeForConstruct:
+        if (UnlinkedFunctionCodeBlock* codeBlock = m_unlinkedCodeBlockForConstruct.get())
+            return codeBlock;
+        break;
+    }
+
+    UnlinkedFunctionCodeBlock* result = generateUnlinkedFunctionCodeBlock(
+        vm, this, source, specializationKind, debuggerMode, 
+        isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, 
+        error, parseMode);
+    
+    if (error.isValid())
+        return nullptr;
+
+    switch (specializationKind) {
+    case CodeForCall:
+        m_unlinkedCodeBlockForCall.set(vm, this, result);
+        break;
+    case CodeForConstruct:
+        m_unlinkedCodeBlockForConstruct.set(vm, this, result);
+        break;
+    }
+    return result;
+}
+
+void UnlinkedFunctionExecutable::setInvalidTypeProfilingOffsets()
+{
+    m_typeProfilingStartOffset = std::numeric_limits::max();
+    m_typeProfilingEndOffset = std::numeric_limits::max();
+}
+
+} // namespace JSC
diff --git a/bytecode/UnlinkedFunctionExecutable.h b/bytecode/UnlinkedFunctionExecutable.h
new file mode 100644
index 0000000..0879dfb
--- /dev/null
+++ b/bytecode/UnlinkedFunctionExecutable.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "BytecodeConventions.h"
+#include "CodeSpecializationKind.h"
+#include "CodeType.h"
+#include "ConstructAbility.h"
+#include "ExecutableInfo.h"
+#include "ExpressionRangeInfo.h"
+#include "HandlerInfo.h"
+#include "Identifier.h"
+#include "Intrinsic.h"
+#include "JSCell.h"
+#include "JSString.h"
+#include "ParserModes.h"
+#include "RegExp.h"
+#include "SpecialPointer.h"
+#include "VariableEnvironment.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+class FunctionMetadataNode;
+class FunctionExecutable;
+class ParserError;
+class SourceCode;
+class SourceProvider;
+class UnlinkedFunctionCodeBlock;
+
+enum UnlinkedFunctionKind {
+    UnlinkedNormalFunction,
+    UnlinkedBuiltinFunction,
+};
+
+class UnlinkedFunctionExecutable final : public JSCell {
+public:
+    friend class CodeCache;
+    friend class VM;
+
+    typedef JSCell Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, JSParserScriptMode scriptMode, VariableEnvironment& parentScopeTDZVariables, DerivedContextType derivedContextType, SourceCode&& parentSourceOverride = SourceCode())
+    {
+        UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell(vm->heap))
+            UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTFMove(parentSourceOverride), node, unlinkedFunctionKind, constructAbility, scriptMode, parentScopeTDZVariables, derivedContextType);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    const Identifier& name() const { return m_name; }
+    const Identifier& ecmaName() const { return m_ecmaName; }
+    void setEcmaName(const Identifier& name) { m_ecmaName = name; }
+    const Identifier& inferredName() const { return m_inferredName; }
+    unsigned parameterCount() const { return m_parameterCount; }; // Excluding 'this'!
+    unsigned functionLength() const { return m_functionLength; }
+    SourceParseMode parseMode() const { return static_cast(m_sourceParseMode); };
+
+    const SourceCode& classSource() const { return m_classSource; };
+    void setClassSource(const SourceCode& source) { m_classSource = source; };
+
+    bool isInStrictContext() const { return m_isInStrictContext; }
+    FunctionMode functionMode() const { return static_cast(m_functionMode); }
+    ConstructorKind constructorKind() const { return static_cast(m_constructorKind); }
+    SuperBinding superBinding() const { return static_cast(m_superBinding); }
+
+    unsigned lineCount() const { return m_lineCount; }
+    unsigned linkedStartColumn(unsigned parentStartColumn) const { return m_unlinkedBodyStartColumn + (!m_firstLineOffset ? parentStartColumn : 1); }
+    unsigned linkedEndColumn(unsigned startColumn) const { return m_unlinkedBodyEndColumn + (!m_lineCount ? startColumn : 1); }
+
+    unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; }
+    unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; }
+    unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; }
+    unsigned startOffset() const { return m_startOffset; }
+    unsigned sourceLength() { return m_sourceLength; }
+    unsigned parametersStartOffset() const { return m_parametersStartOffset; }
+    unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; }
+    unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; }
+    void setInvalidTypeProfilingOffsets();
+
+    UnlinkedFunctionCodeBlock* unlinkedCodeBlockFor(
+        VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode,
+        ParserError&, SourceParseMode);
+
+    static UnlinkedFunctionExecutable* fromGlobalCode(
+        const Identifier&, ExecState&, const SourceCode&, JSObject*& exception, 
+        int overrideLineNumber);
+
+    JS_EXPORT_PRIVATE FunctionExecutable* link(VM&, const SourceCode& parentSource, std::optional overrideLineNumber = std::nullopt, Intrinsic = NoIntrinsic);
+
+    void clearCode()
+    {
+        m_unlinkedCodeBlockForCall.clear();
+        m_unlinkedCodeBlockForConstruct.clear();
+    }
+
+    void recordParse(CodeFeatures features, bool hasCapturedVariables)
+    {
+        m_features = features;
+        m_hasCapturedVariables = hasCapturedVariables;
+    }
+
+    CodeFeatures features() const { return m_features; }
+    bool hasCapturedVariables() const { return m_hasCapturedVariables; }
+
+    static const bool needsDestruction = true;
+    static void destroy(JSCell*);
+
+    bool isBuiltinFunction() const { return m_isBuiltinFunction; }
+    ConstructAbility constructAbility() const { return static_cast(m_constructAbility); }
+    JSParserScriptMode scriptMode() const { return static_cast(m_scriptMode); }
+    bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; }
+    const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; }
+    
+    bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
+
+    JSC::DerivedContextType derivedContextType() const {return static_cast(m_derivedContextType); }
+
+    const String& sourceURLDirective() const { return m_sourceURLDirective; }
+    const String& sourceMappingURLDirective() const { return m_sourceMappingURLDirective; }
+    void setSourceURLDirective(const String& sourceURL) { m_sourceURLDirective = sourceURL; }
+    void setSourceMappingURLDirective(const String& sourceMappingURL) { m_sourceMappingURLDirective = sourceMappingURL; }
+
+private:
+    UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, SourceCode&& parentSourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, JSParserScriptMode, VariableEnvironment&,  JSC::DerivedContextType);
+
+    unsigned m_firstLineOffset;
+    unsigned m_lineCount;
+    unsigned m_unlinkedFunctionNameStart;
+    unsigned m_unlinkedBodyStartColumn;
+    unsigned m_unlinkedBodyEndColumn;
+    unsigned m_startOffset;
+    unsigned m_sourceLength;
+    unsigned m_parametersStartOffset;
+    unsigned m_typeProfilingStartOffset;
+    unsigned m_typeProfilingEndOffset;
+    unsigned m_parameterCount;
+    unsigned m_functionLength;
+    CodeFeatures m_features;
+    SourceParseMode m_sourceParseMode;
+    unsigned m_isInStrictContext : 1;
+    unsigned m_hasCapturedVariables : 1;
+    unsigned m_isBuiltinFunction : 1;
+    unsigned m_constructAbility: 1;
+    unsigned m_constructorKind : 2;
+    unsigned m_functionMode : 2; // FunctionMode
+    unsigned m_scriptMode: 1; // JSParserScriptMode
+    unsigned m_superBinding : 1;
+    unsigned m_derivedContextType: 2;
+
+    WriteBarrier m_unlinkedCodeBlockForCall;
+    WriteBarrier m_unlinkedCodeBlockForConstruct;
+
+    Identifier m_name;
+    Identifier m_ecmaName;
+    Identifier m_inferredName;
+    SourceCode m_parentSourceOverride;
+    SourceCode m_classSource;
+
+    String m_sourceURLDirective;
+    String m_sourceMappingURLDirective;
+
+    VariableEnvironment m_parentScopeTDZVariables;
+
+protected:
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info());
+    }
+
+    DECLARE_EXPORT_INFO;
+};
+
+} // namespace JSC
diff --git a/bytecode/UnlinkedGlobalCodeBlock.h b/bytecode/UnlinkedGlobalCodeBlock.h
new file mode 100644
index 0000000..343862e
--- /dev/null
+++ b/bytecode/UnlinkedGlobalCodeBlock.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock {
+public:
+    typedef UnlinkedCodeBlock Base;
+
+protected:
+    UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, codeType, info, debuggerMode)
+    {
+    }
+};
+
+}
diff --git a/bytecode/UnlinkedInstructionStream.cpp b/bytecode/UnlinkedInstructionStream.cpp
new file mode 100644
index 0000000..e8762ff
--- /dev/null
+++ b/bytecode/UnlinkedInstructionStream.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedInstructionStream.h"
+
+#include "Opcode.h"
+
+namespace JSC {
+
+static void append8(unsigned char*& ptr, unsigned char value)
+{
+    *(ptr++) = value;
+}
+
+static void append32(unsigned char*& ptr, unsigned value)
+{
+    if (!(value & 0xffffffe0)) {
+        *(ptr++) = value;
+        return;
+    }
+
+    if ((value & 0xffffffe0) == 0xffffffe0) {
+        *(ptr++) = (Negative5Bit << 5) | (value & 0x1f);
+        return;
+    }
+
+    if ((value & 0xffffffe0) == 0x40000000) {
+        *(ptr++) = (ConstantRegister5Bit << 5) | (value & 0x1f);
+        return;
+    }
+
+    if (!(value & 0xffffe000)) {
+        *(ptr++) = (Positive13Bit << 5) | ((value >> 8) & 0x1f);
+        *(ptr++) = value & 0xff;
+        return;
+    }
+
+    if ((value & 0xffffe000) == 0xffffe000) {
+        *(ptr++) = (Negative13Bit << 5) | ((value >> 8) & 0x1f);
+        *(ptr++) = value & 0xff;
+        return;
+    }
+
+    if ((value & 0xffffe000) == 0x40000000) {
+        *(ptr++) = (ConstantRegister13Bit << 5) | ((value >> 8) & 0x1f);
+        *(ptr++) = value & 0xff;
+        return;
+    }
+
+    *(ptr++) = Full32Bit << 5;
+    *(ptr++) = value & 0xff;
+    *(ptr++) = (value >> 8) & 0xff;
+    *(ptr++) = (value >> 16) & 0xff;
+    *(ptr++) = (value >> 24) & 0xff;
+}
+
+UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector& instructions)
+    : m_instructionCount(instructions.size())
+{
+    Vector buffer;
+
+    // Reserve enough space up front so we never have to reallocate when appending.
+    buffer.resizeToFit(m_instructionCount * 5);
+    unsigned char* ptr = buffer.data();
+
+    const UnlinkedInstruction* instructionsData = instructions.data();
+    for (unsigned i = 0; i < m_instructionCount;) {
+        const UnlinkedInstruction* pc = &instructionsData[i];
+        OpcodeID opcode = pc[0].u.opcode;
+        append8(ptr, opcode);
+
+        unsigned opLength = opcodeLength(opcode);
+
+        for (unsigned j = 1; j < opLength; ++j)
+            append32(ptr, pc[j].u.index);
+
+        i += opLength;
+    }
+
+    buffer.shrink(ptr - buffer.data());
+    m_data = RefCountedArray(buffer);
+}
+
+size_t UnlinkedInstructionStream::sizeInBytes() const
+{
+    return m_data.size() * sizeof(unsigned char);
+}
+
+#ifndef NDEBUG
+const RefCountedArray& UnlinkedInstructionStream::unpackForDebugging() const
+{
+    if (!m_unpackedInstructionsForDebugging.size()) {
+        m_unpackedInstructionsForDebugging = RefCountedArray(m_instructionCount);
+
+        Reader instructionReader(*this);
+        for (unsigned i = 0; !instructionReader.atEnd(); ) {
+            const UnlinkedInstruction* pc = instructionReader.next();
+            unsigned opLength = opcodeLength(pc[0].u.opcode);
+            for (unsigned j = 0; j < opLength; ++j)
+                m_unpackedInstructionsForDebugging[i++] = pc[j];
+        }
+    }
+
+    return m_unpackedInstructionsForDebugging;
+}
+#endif
+
+}
+
diff --git a/bytecode/UnlinkedInstructionStream.h b/bytecode/UnlinkedInstructionStream.h
new file mode 100644
index 0000000..ef139ad
--- /dev/null
+++ b/bytecode/UnlinkedInstructionStream.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#pragma once
+
+#include "Opcode.h"
+#include "UnlinkedCodeBlock.h"
+#include 
+
+namespace JSC {
+
+class UnlinkedInstructionStream {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    explicit UnlinkedInstructionStream(const Vector&);
+
+    unsigned count() const { return m_instructionCount; }
+    size_t sizeInBytes() const;
+
+    class Reader {
+    public:
+        explicit Reader(const UnlinkedInstructionStream&);
+
+        const UnlinkedInstruction* next();
+        bool atEnd() const { return m_index == m_stream.m_data.size(); }
+
+    private:
+        unsigned char read8();
+        unsigned read32();
+
+        const UnlinkedInstructionStream& m_stream;
+        UnlinkedInstruction m_unpackedBuffer[16];
+        unsigned m_index;
+    };
+
+#ifndef NDEBUG
+    const RefCountedArray& unpackForDebugging() const;
+#endif
+
+private:
+    friend class Reader;
+
+#ifndef NDEBUG
+    mutable RefCountedArray m_unpackedInstructionsForDebugging;
+#endif
+
+    RefCountedArray m_data;
+    unsigned m_instructionCount;
+};
+
+// Unlinked instructions are packed in a simple stream format.
+//
+// The first byte is always the opcode.
+// It's followed by an opcode-dependent number of argument values.
+// The first 3 bits of each value determines the format:
+//
+//     5-bit positive integer (1 byte total)
+//     5-bit negative integer (1 byte total)
+//     13-bit positive integer (2 bytes total)
+//     13-bit negative integer (2 bytes total)
+//     5-bit constant register index, based at 0x40000000 (1 byte total)
+//     13-bit constant register index, based at 0x40000000 (2 bytes total)
+//     32-bit raw value (5 bytes total)
+
+enum PackedValueType {
+    Positive5Bit = 0,
+    Negative5Bit,
+    Positive13Bit,
+    Negative13Bit,
+    ConstantRegister5Bit,
+    ConstantRegister13Bit,
+    Full32Bit
+};
+
+ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream)
+    : m_stream(stream)
+    , m_index(0)
+{
+}
+
+ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8()
+{
+    return m_stream.m_data.data()[m_index++];
+}
+
+ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32()
+{
+    const unsigned char* data = &m_stream.m_data.data()[m_index];
+    unsigned char type = data[0] >> 5;
+
+    switch (type) {
+    case Positive5Bit:
+        m_index++;
+        return data[0];
+    case Negative5Bit:
+        m_index++;
+        return 0xffffffe0 | data[0];
+    case Positive13Bit:
+        m_index += 2;
+        return ((data[0] & 0x1F) << 8) | data[1];
+    case Negative13Bit:
+        m_index += 2;
+        return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1];
+    case ConstantRegister5Bit:
+        m_index++;
+        return 0x40000000 | (data[0] & 0x1F);
+    case ConstantRegister13Bit:
+        m_index += 2;
+        return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1];
+    default:
+        ASSERT(type == Full32Bit);
+        m_index += 5;
+        return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24;
+    }
+}
+
+ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next()
+{
+    m_unpackedBuffer[0].u.opcode = static_cast(read8());
+    unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode);
+    for (unsigned i = 1; i < opLength; ++i)
+        m_unpackedBuffer[i].u.index = read32();
+    return m_unpackedBuffer;
+}
+
+} // namespace JSC
diff --git a/bytecode/UnlinkedModuleProgramCodeBlock.cpp b/bytecode/UnlinkedModuleProgramCodeBlock.cpp
new file mode 100644
index 0000000..00f36c0
--- /dev/null
+++ b/bytecode/UnlinkedModuleProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedModuleProgramCodeBlock::s_info = { "UnlinkedModuleProgramCodeBlock", &Base::s_info, nullptr, CREATE_METHOD_TABLE(UnlinkedModuleProgramCodeBlock) };
+
+void UnlinkedModuleProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedModuleProgramCodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedModuleProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedModuleProgramCodeBlock();
+}
+
+}
diff --git a/bytecode/UnlinkedModuleProgramCodeBlock.h b/bytecode/UnlinkedModuleProgramCodeBlock.h
new file mode 100644
index 0000000..8676a24
--- /dev/null
+++ b/bytecode/UnlinkedModuleProgramCodeBlock.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedModuleProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedModuleProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedModuleProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedModuleProgramCodeBlock(vm, vm->unlinkedModuleProgramCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+    // This offset represents the constant register offset to the stored symbol table that represents the layout of the
+    // module environment. This symbol table is created by the byte code generator since the module environment includes
+    // the top-most lexical captured variables inside the module code. This means that, once the module environment is
+    // allocated and instantiated from this symbol table, it is titely coupled with the specific unlinked module program
+    // code block and the stored symbol table. So before executing the module code, we should not clear the unlinked module
+    // program code block in the module executable. This requirement is met because the garbage collector only clears
+    // unlinked code in (1) unmarked executables and (2) function executables.
+    //
+    // Since the function code may be executed repeatedly and the environment of each function execution is different,
+    // the function code need to allocate and instantiate the environment in the prologue of the function code. On the
+    // other hand, the module code is executed only once. So we can instantiate the module environment outside the module
+    // code. At that time, we construct the module environment by using the symbol table that is held by the module executable.
+    // The symbol table held by the executable is the cloned one from one in the unlinked code block. Instantiating the module
+    // environment before executing and linking the module code is required to link the imported bindings between the modules.
+    //
+    // The unlinked module program code block only holds the pre-cloned symbol table in its constant register pool. It does
+    // not hold the instantiated module environment. So while the module environment requires the specific unlinked module
+    // program code block, the unlinked module code block can be used for the module environment instantiated from this
+    // unlinked code block. There is 1:N relation between the unlinked module code block and the module environments. So the
+    // unlinked module program code block can be cached.
+    //
+    // On the other hand, the linked code block for the module environment includes the resolved references to the imported
+    // bindings. The imported binding references the other module environment, so the linked code block is titly coupled
+    // with the specific set of the module environments. Thus, the linked code block should not be cached.
+    int moduleEnvironmentSymbolTableConstantRegisterOffset() { return m_moduleEnvironmentSymbolTableConstantRegisterOffset; }
+    void setModuleEnvironmentSymbolTableConstantRegisterOffset(int offset)
+    {
+        m_moduleEnvironmentSymbolTableConstantRegisterOffset = offset;
+    }
+
+private:
+    UnlinkedModuleProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, ModuleCode, info, debuggerMode)
+    {
+    }
+
+    int m_moduleEnvironmentSymbolTableConstantRegisterOffset { 0 };
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedModuleProgramCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/bytecode/UnlinkedProgramCodeBlock.cpp b/bytecode/UnlinkedProgramCodeBlock.cpp
new file mode 100644
index 0000000..95df299
--- /dev/null
+++ b/bytecode/UnlinkedProgramCodeBlock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012-2013, 2015-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "UnlinkedProgramCodeBlock.h"
+
+#include "HeapInlines.h"
+#include "JSCellInlines.h"
+
+namespace JSC {
+
+const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) };
+
+void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
+{
+    UnlinkedProgramCodeBlock* thisObject = jsCast(cell);
+    ASSERT_GC_OBJECT_INHERITS(thisObject, info());
+    Base::visitChildren(thisObject, visitor);
+}
+
+void UnlinkedProgramCodeBlock::destroy(JSCell* cell)
+{
+    static_cast(cell)->~UnlinkedProgramCodeBlock();
+}
+
+}
diff --git a/bytecode/UnlinkedProgramCodeBlock.h b/bytecode/UnlinkedProgramCodeBlock.h
new file mode 100644
index 0000000..290eae4
--- /dev/null
+++ b/bytecode/UnlinkedProgramCodeBlock.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "UnlinkedGlobalCodeBlock.h"
+
+namespace JSC {
+
+class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock {
+public:
+    typedef UnlinkedGlobalCodeBlock Base;
+    static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
+
+    static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info, DebuggerMode debuggerMode)
+    {
+        UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info, debuggerMode);
+        instance->finishCreation(*vm);
+        return instance;
+    }
+
+    static void destroy(JSCell*);
+
+    void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; }
+    const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; }
+
+    void setLexicalDeclarations(const VariableEnvironment& environment) { m_lexicalDeclarations = environment; }
+    const VariableEnvironment& lexicalDeclarations() const { return m_lexicalDeclarations; }
+
+    static void visitChildren(JSCell*, SlotVisitor&);
+
+private:
+    UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info, DebuggerMode debuggerMode)
+        : Base(vm, structure, GlobalCode, info, debuggerMode)
+    {
+    }
+
+    VariableEnvironment m_varDeclarations;
+    VariableEnvironment m_lexicalDeclarations;
+
+public:
+    static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto)
+    {
+        return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info());
+    }
+
+    DECLARE_INFO;
+};
+
+}
diff --git a/bytecode/ValueProfile.h b/bytecode/ValueProfile.h
new file mode 100644
index 0000000..8724eb4
--- /dev/null
+++ b/bytecode/ValueProfile.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2011-2013, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ConcurrentJSLock.h"
+#include "Heap.h"
+#include "JSArray.h"
+#include "SpeculatedType.h"
+#include "Structure.h"
+#include "TagRegistersMode.h"
+#include "WriteBarrier.h"
+#include 
+#include 
+
+namespace JSC {
+
+template
+struct ValueProfileBase {
+    static const unsigned numberOfBuckets = numberOfBucketsArgument;
+    static const unsigned numberOfSpecFailBuckets = 1;
+    static const unsigned bucketIndexMask = numberOfBuckets - 1;
+    static const unsigned totalNumberOfBuckets = numberOfBuckets + numberOfSpecFailBuckets;
+    
+    ValueProfileBase()
+        : m_bytecodeOffset(-1)
+        , m_prediction(SpecNone)
+        , m_numberOfSamplesInPrediction(0)
+    {
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
+            m_buckets[i] = JSValue::encode(JSValue());
+    }
+    
+    ValueProfileBase(int bytecodeOffset)
+        : m_bytecodeOffset(bytecodeOffset)
+        , m_prediction(SpecNone)
+        , m_numberOfSamplesInPrediction(0)
+    {
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
+            m_buckets[i] = JSValue::encode(JSValue());
+    }
+    
+    EncodedJSValue* specFailBucket(unsigned i)
+    {
+        ASSERT(numberOfBuckets + i < totalNumberOfBuckets);
+        return m_buckets + numberOfBuckets + i;
+    }
+    
+    const ClassInfo* classInfo(unsigned bucket) const
+    {
+        JSValue value = JSValue::decode(m_buckets[bucket]);
+        if (!!value) {
+            if (!value.isCell())
+                return 0;
+            return value.asCell()->structure()->classInfo();
+        }
+        return 0;
+    }
+    
+    unsigned numberOfSamples() const
+    {
+        unsigned result = 0;
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+            if (!!JSValue::decode(m_buckets[i]))
+                result++;
+        }
+        return result;
+    }
+    
+    unsigned totalNumberOfSamples() const
+    {
+        return numberOfSamples() + m_numberOfSamplesInPrediction;
+    }
+    
+    bool isLive() const
+    {
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+            if (!!JSValue::decode(m_buckets[i]))
+                return true;
+        }
+        return false;
+    }
+    
+    CString briefDescription(const ConcurrentJSLocker& locker)
+    {
+        computeUpdatedPrediction(locker);
+        
+        StringPrintStream out;
+        out.print("predicting ", SpeculationDump(m_prediction));
+        return out.toCString();
+    }
+    
+    void dump(PrintStream& out)
+    {
+        out.print("samples = ", totalNumberOfSamples(), " prediction = ", SpeculationDump(m_prediction));
+        bool first = true;
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+            JSValue value = JSValue::decode(m_buckets[i]);
+            if (!!value) {
+                if (first) {
+                    out.printf(": ");
+                    first = false;
+                } else
+                    out.printf(", ");
+                out.print(value);
+            }
+        }
+    }
+    
+    // Updates the prediction and returns the new one. Never call this from any thread
+    // that isn't executing the code.
+    SpeculatedType computeUpdatedPrediction(const ConcurrentJSLocker&)
+    {
+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+            JSValue value = JSValue::decode(m_buckets[i]);
+            if (!value)
+                continue;
+            
+            m_numberOfSamplesInPrediction++;
+            mergeSpeculation(m_prediction, speculationFromValue(value));
+            
+            m_buckets[i] = JSValue::encode(JSValue());
+        }
+        
+        return m_prediction;
+    }
+    
+    int m_bytecodeOffset; // -1 for prologue
+    
+    SpeculatedType m_prediction;
+    unsigned m_numberOfSamplesInPrediction;
+    
+    EncodedJSValue m_buckets[totalNumberOfBuckets];
+};
+
+struct MinimalValueProfile : public ValueProfileBase<0> {
+    MinimalValueProfile(): ValueProfileBase<0>() { }
+    MinimalValueProfile(int bytecodeOffset): ValueProfileBase<0>(bytecodeOffset) { }
+};
+
+template
+struct ValueProfileWithLogNumberOfBuckets : public ValueProfileBase<1 << logNumberOfBucketsArgument> {
+    static const unsigned logNumberOfBuckets = logNumberOfBucketsArgument;
+    
+    ValueProfileWithLogNumberOfBuckets()
+        : ValueProfileBase<1 << logNumberOfBucketsArgument>()
+    {
+    }
+    ValueProfileWithLogNumberOfBuckets(int bytecodeOffset)
+        : ValueProfileBase<1 << logNumberOfBucketsArgument>(bytecodeOffset)
+    {
+    }
+};
+
+struct ValueProfile : public ValueProfileWithLogNumberOfBuckets<0> {
+    ValueProfile(): ValueProfileWithLogNumberOfBuckets<0>() { }
+    ValueProfile(int bytecodeOffset): ValueProfileWithLogNumberOfBuckets<0>(bytecodeOffset) { }
+};
+
+template
+inline int getValueProfileBytecodeOffset(T* valueProfile)
+{
+    return valueProfile->m_bytecodeOffset;
+}
+
+// This is a mini value profile to catch pathologies. It is a counter that gets
+// incremented when we take the slow path on any instruction.
+struct RareCaseProfile {
+    RareCaseProfile(int bytecodeOffset)
+        : m_bytecodeOffset(bytecodeOffset)
+        , m_counter(0)
+    {
+    }
+    
+    int m_bytecodeOffset;
+    uint32_t m_counter;
+};
+
+inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
+{
+    return rareCaseProfile->m_bytecodeOffset;
+}
+
+} // namespace JSC
diff --git a/bytecode/ValueRecovery.cpp b/bytecode/ValueRecovery.cpp
new file mode 100644
index 0000000..9c083b0
--- /dev/null
+++ b/bytecode/ValueRecovery.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "ValueRecovery.h"
+
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JSValue ValueRecovery::recover(ExecState* exec) const
+{
+    switch (technique()) {
+    case DisplacedInJSStack:
+        return exec->r(virtualRegister().offset()).jsValue();
+    case Int32DisplacedInJSStack:
+        return jsNumber(exec->r(virtualRegister().offset()).unboxedInt32());
+    case Int52DisplacedInJSStack:
+        return jsNumber(exec->r(virtualRegister().offset()).unboxedInt52());
+    case StrictInt52DisplacedInJSStack:
+        return jsNumber(exec->r(virtualRegister().offset()).unboxedStrictInt52());
+    case DoubleDisplacedInJSStack:
+        return jsNumber(exec->r(virtualRegister().offset()).unboxedDouble());
+    case CellDisplacedInJSStack:
+        return exec->r(virtualRegister().offset()).unboxedCell();
+    case BooleanDisplacedInJSStack:
+#if USE(JSVALUE64)
+        return exec->r(virtualRegister().offset()).jsValue();
+#else
+        return jsBoolean(exec->r(virtualRegister().offset()).unboxedBoolean());
+#endif
+    case Constant:
+        return constant();
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        return JSValue();
+    }
+}
+
+#if ENABLE(JIT)
+
+void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const
+{
+    switch (technique()) {
+    case InGPR:
+        out.print(gpr());
+        return;
+    case UnboxedInt32InGPR:
+        out.print("int32(", gpr(), ")");
+        return;
+    case UnboxedInt52InGPR:
+        out.print("int52(", gpr(), ")");
+        return;
+    case UnboxedStrictInt52InGPR:
+        out.print("strictInt52(", gpr(), ")");
+        return;
+    case UnboxedBooleanInGPR:
+        out.print("bool(", gpr(), ")");
+        return;
+    case UnboxedCellInGPR:
+        out.print("cell(", gpr(), ")");
+        return;
+    case InFPR:
+        out.print(fpr());
+        return;
+    case UnboxedDoubleInFPR:
+        out.print("double(", fpr(), ")");
+        return;
+#if USE(JSVALUE32_64)
+    case InPair:
+        out.print("pair(", tagGPR(), ", ", payloadGPR(), ")");
+        return;
+#endif
+    case DisplacedInJSStack:
+        out.print("*", virtualRegister());
+        return;
+    case Int32DisplacedInJSStack:
+        out.print("*int32(", virtualRegister(), ")");
+        return;
+    case Int52DisplacedInJSStack:
+        out.print("*int52(", virtualRegister(), ")");
+        return;
+    case StrictInt52DisplacedInJSStack:
+        out.print("*strictInt52(", virtualRegister(), ")");
+        return;
+    case DoubleDisplacedInJSStack:
+        out.print("*double(", virtualRegister(), ")");
+        return;
+    case CellDisplacedInJSStack:
+        out.print("*cell(", virtualRegister(), ")");
+        return;
+    case BooleanDisplacedInJSStack:
+        out.print("*bool(", virtualRegister(), ")");
+        return;
+    case DirectArgumentsThatWereNotCreated:
+        out.print("DirectArguments(", nodeID(), ")");
+        return;
+    case ClonedArgumentsThatWereNotCreated:
+        out.print("ClonedArguments(", nodeID(), ")");
+        return;
+    case Constant:
+        out.print("[", inContext(constant(), context), "]");
+        return;
+    case DontKnow:
+        out.printf("!");
+        return;
+    }
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void ValueRecovery::dump(PrintStream& out) const
+{
+    dumpInContext(out, 0);
+}
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
diff --git a/bytecode/ValueRecovery.h b/bytecode/ValueRecovery.h
new file mode 100644
index 0000000..c98fd20
--- /dev/null
+++ b/bytecode/ValueRecovery.h
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "DFGMinifiedID.h"
+#include "DataFormat.h"
+#if ENABLE(JIT)
+#include "GPRInfo.h"
+#include "FPRInfo.h"
+#include "Reg.h"
+#endif
+#include "JSCJSValue.h"
+#include "MacroAssembler.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+struct DumpContext;
+struct InlineCallFrame;
+
+// Describes how to recover a given bytecode virtual register at a given
+// code point.
+enum ValueRecoveryTechnique {
+    // It's in a register.
+    InGPR,
+    UnboxedInt32InGPR,
+    UnboxedInt52InGPR,
+    UnboxedStrictInt52InGPR,
+    UnboxedBooleanInGPR,
+    UnboxedCellInGPR,
+#if USE(JSVALUE32_64)
+    InPair,
+#endif
+    InFPR,
+    UnboxedDoubleInFPR,
+    // It's in the stack, but at a different location.
+    DisplacedInJSStack,
+    // It's in the stack, at a different location, and it's unboxed.
+    Int32DisplacedInJSStack,
+    Int52DisplacedInJSStack,
+    StrictInt52DisplacedInJSStack,
+    DoubleDisplacedInJSStack,
+    CellDisplacedInJSStack,
+    BooleanDisplacedInJSStack,
+    // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG.
+    DirectArgumentsThatWereNotCreated,
+    ClonedArgumentsThatWereNotCreated,
+    // It's a constant.
+    Constant,
+    // Don't know how to recover it.
+    DontKnow
+};
+
+class ValueRecovery {
+public:
+    ValueRecovery()
+        : m_technique(DontKnow)
+    {
+    }
+    
+    bool isSet() const { return m_technique != DontKnow; }
+    bool operator!() const { return !isSet(); }
+
+#if ENABLE(JIT)
+    static ValueRecovery inRegister(Reg reg, DataFormat dataFormat)
+    {
+        if (reg.isGPR())
+            return inGPR(reg.gpr(), dataFormat);
+
+        ASSERT(reg.isFPR());
+        return inFPR(reg.fpr(), dataFormat);
+    }
+#endif
+
+    explicit operator bool() const { return isSet(); }
+    
+    static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat)
+    {
+        ASSERT(dataFormat != DataFormatNone);
+#if USE(JSVALUE32_64)
+        ASSERT(dataFormat == DataFormatInt32 || dataFormat == DataFormatCell || dataFormat == DataFormatBoolean);
+#endif
+        ValueRecovery result;
+        if (dataFormat == DataFormatInt32)
+            result.m_technique = UnboxedInt32InGPR;
+        else if (dataFormat == DataFormatInt52)
+            result.m_technique = UnboxedInt52InGPR;
+        else if (dataFormat == DataFormatStrictInt52)
+            result.m_technique = UnboxedStrictInt52InGPR;
+        else if (dataFormat == DataFormatBoolean)
+            result.m_technique = UnboxedBooleanInGPR;
+        else if (dataFormat == DataFormatCell)
+            result.m_technique = UnboxedCellInGPR;
+        else
+            result.m_technique = InGPR;
+        result.m_source.gpr = gpr;
+        return result;
+    }
+    
+#if USE(JSVALUE32_64)
+    static ValueRecovery inPair(MacroAssembler::RegisterID tagGPR, MacroAssembler::RegisterID payloadGPR)
+    {
+        ValueRecovery result;
+        result.m_technique = InPair;
+        result.m_source.pair.tagGPR = tagGPR;
+        result.m_source.pair.payloadGPR = payloadGPR;
+        return result;
+    }
+#endif
+
+    static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr, DataFormat dataFormat)
+    {
+        ASSERT(dataFormat == DataFormatDouble || dataFormat & DataFormatJS);
+        ValueRecovery result;
+        if (dataFormat == DataFormatDouble)
+            result.m_technique = UnboxedDoubleInFPR;
+        else
+            result.m_technique = InFPR;
+        result.m_source.fpr = fpr;
+        return result;
+    }
+    
+    static ValueRecovery displacedInJSStack(VirtualRegister virtualReg, DataFormat dataFormat)
+    {
+        ValueRecovery result;
+        switch (dataFormat) {
+        case DataFormatInt32:
+            result.m_technique = Int32DisplacedInJSStack;
+            break;
+            
+        case DataFormatInt52:
+            result.m_technique = Int52DisplacedInJSStack;
+            break;
+            
+        case DataFormatStrictInt52:
+            result.m_technique = StrictInt52DisplacedInJSStack;
+            break;
+            
+        case DataFormatDouble:
+            result.m_technique = DoubleDisplacedInJSStack;
+            break;
+
+        case DataFormatCell:
+            result.m_technique = CellDisplacedInJSStack;
+            break;
+            
+        case DataFormatBoolean:
+            result.m_technique = BooleanDisplacedInJSStack;
+            break;
+            
+        default:
+            ASSERT(dataFormat != DataFormatNone && dataFormat != DataFormatStorage);
+            result.m_technique = DisplacedInJSStack;
+            break;
+        }
+        result.m_source.virtualReg = virtualReg.offset();
+        return result;
+    }
+    
+    static ValueRecovery constant(JSValue value)
+    {
+        ValueRecovery result;
+        result.m_technique = Constant;
+        result.m_source.constant = JSValue::encode(value);
+        return result;
+    }
+    
+    static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id)
+    {
+        ValueRecovery result;
+        result.m_technique = DirectArgumentsThatWereNotCreated;
+        result.m_source.nodeID = id.bits();
+        return result;
+    }
+    
+    static ValueRecovery clonedArgumentsThatWereNotCreated(DFG::MinifiedID id)
+    {
+        ValueRecovery result;
+        result.m_technique = ClonedArgumentsThatWereNotCreated;
+        result.m_source.nodeID = id.bits();
+        return result;
+    }
+
+    ValueRecoveryTechnique technique() const { return m_technique; }
+    
+    bool isConstant() const { return m_technique == Constant; }
+
+    bool isInGPR() const
+    {
+        switch (m_technique) {
+        case InGPR:
+        case UnboxedInt32InGPR:
+        case UnboxedBooleanInGPR:
+        case UnboxedCellInGPR:
+        case UnboxedInt52InGPR:
+        case UnboxedStrictInt52InGPR:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isInFPR() const
+    {
+        switch (m_technique) {
+        case InFPR:
+        case UnboxedDoubleInFPR:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    bool isInRegisters() const
+    {
+        return isInJSValueRegs() || isInGPR() || isInFPR();
+    }
+
+    bool isInJSStack() const
+    {
+        switch (m_technique) {
+        case DisplacedInJSStack:
+        case Int32DisplacedInJSStack:
+        case Int52DisplacedInJSStack:
+        case StrictInt52DisplacedInJSStack:
+        case DoubleDisplacedInJSStack:
+        case CellDisplacedInJSStack:
+        case BooleanDisplacedInJSStack:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    DataFormat dataFormat() const
+    {
+        switch (m_technique) {
+        case InGPR:
+        case InFPR:
+        case DisplacedInJSStack:
+        case Constant:
+#if USE(JSVALUE32_64)
+        case InPair:
+#endif
+            return DataFormatJS;
+        case UnboxedInt32InGPR:
+        case Int32DisplacedInJSStack:
+            return DataFormatInt32;
+        case UnboxedInt52InGPR:
+        case Int52DisplacedInJSStack:
+            return DataFormatInt52;
+        case UnboxedStrictInt52InGPR:
+        case StrictInt52DisplacedInJSStack:
+            return DataFormatStrictInt52;
+        case UnboxedBooleanInGPR:
+        case BooleanDisplacedInJSStack:
+            return DataFormatBoolean;
+        case UnboxedCellInGPR:
+        case CellDisplacedInJSStack:
+            return DataFormatCell;
+        case UnboxedDoubleInFPR:
+        case DoubleDisplacedInJSStack:
+            return DataFormatDouble;
+        default:
+            return DataFormatNone;
+        }
+    }
+    
+    MacroAssembler::RegisterID gpr() const
+    {
+        ASSERT(isInGPR());
+        return m_source.gpr;
+    }
+    
+#if USE(JSVALUE32_64)
+    MacroAssembler::RegisterID tagGPR() const
+    {
+        ASSERT(m_technique == InPair);
+        return m_source.pair.tagGPR;
+    }
+    
+    MacroAssembler::RegisterID payloadGPR() const
+    {
+        ASSERT(m_technique == InPair);
+        return m_source.pair.payloadGPR;
+    }
+
+    bool isInJSValueRegs() const
+    {
+        return m_technique == InPair;
+    }
+
+#if ENABLE(JIT)
+    JSValueRegs jsValueRegs() const
+    {
+        ASSERT(isInJSValueRegs());
+        return JSValueRegs(tagGPR(), payloadGPR());
+    }
+#endif // ENABLE(JIT)
+#else
+    bool isInJSValueRegs() const
+    {
+        return isInGPR();
+    }
+#endif // USE(JSVALUE32_64)
+    
+    MacroAssembler::FPRegisterID fpr() const
+    {
+        ASSERT(isInFPR());
+        return m_source.fpr;
+    }
+    
+    VirtualRegister virtualRegister() const
+    {
+        ASSERT(isInJSStack());
+        return VirtualRegister(m_source.virtualReg);
+    }
+    
+    ValueRecovery withLocalsOffset(int offset) const
+    {
+        switch (m_technique) {
+        case DisplacedInJSStack:
+        case Int32DisplacedInJSStack:
+        case DoubleDisplacedInJSStack:
+        case CellDisplacedInJSStack:
+        case BooleanDisplacedInJSStack:
+        case Int52DisplacedInJSStack:
+        case StrictInt52DisplacedInJSStack: {
+            ValueRecovery result;
+            result.m_technique = m_technique;
+            result.m_source.virtualReg = m_source.virtualReg + offset;
+            return result;
+        }
+            
+        default:
+            return *this;
+        }
+    }
+    
+    JSValue constant() const
+    {
+        ASSERT(isConstant());
+        return JSValue::decode(m_source.constant);
+    }
+    
+    DFG::MinifiedID nodeID() const
+    {
+        ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated);
+        return DFG::MinifiedID::fromBits(m_source.nodeID);
+    }
+    
+    JSValue recover(ExecState*) const;
+    
+#if ENABLE(JIT)
+    template
+    void forEachReg(const Func& func)
+    {
+        switch (m_technique) {
+        case InGPR:
+        case UnboxedInt32InGPR:
+        case UnboxedBooleanInGPR:
+        case UnboxedCellInGPR:
+        case UnboxedInt52InGPR:
+        case UnboxedStrictInt52InGPR:
+            func(gpr());
+            return;
+        case InFPR:
+        case UnboxedDoubleInFPR:
+            func(fpr());
+            return;
+#if USE(JSVALUE32_64)
+        case InPair:
+            func(jsValueRegs().payloadGPR());
+            func(jsValueRegs().tagGPR());
+            return;
+#endif
+        default:
+            return;
+        }
+    }
+    
+    void dumpInContext(PrintStream& out, DumpContext* context) const;
+    void dump(PrintStream& out) const;
+#endif
+
+private:
+    ValueRecoveryTechnique m_technique;
+    union {
+        MacroAssembler::RegisterID gpr;
+        MacroAssembler::FPRegisterID fpr;
+#if USE(JSVALUE32_64)
+        struct {
+            MacroAssembler::RegisterID tagGPR;
+            MacroAssembler::RegisterID payloadGPR;
+        } pair;
+#endif
+        int virtualReg;
+        EncodedJSValue constant;
+        uintptr_t nodeID;
+    } m_source;
+};
+
+} // namespace JSC
diff --git a/bytecode/VariableWriteFireDetail.cpp b/bytecode/VariableWriteFireDetail.cpp
new file mode 100644
index 0000000..ec61984
--- /dev/null
+++ b/bytecode/VariableWriteFireDetail.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "VariableWriteFireDetail.h"
+
+#include "JSCInlines.h"
+
+namespace JSC {
+
+void VariableWriteFireDetail::dump(PrintStream& out) const
+{
+    out.print("Write to ", m_name, " in ", JSValue(m_object));
+}
+
+void VariableWriteFireDetail::touch(VM& vm, WatchpointSet* set, JSObject* object, const PropertyName& name)
+{
+    set->touch(vm, VariableWriteFireDetail(object, name));
+}
+
+} // namespace JSC
+
diff --git a/bytecode/VariableWriteFireDetail.h b/bytecode/VariableWriteFireDetail.h
new file mode 100644
index 0000000..42ffb1b
--- /dev/null
+++ b/bytecode/VariableWriteFireDetail.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "Watchpoint.h"
+
+namespace JSC {
+
+class JSObject;
+class PropertyName;
+
+class VariableWriteFireDetail : public FireDetail {
+public:
+    VariableWriteFireDetail(JSObject* object, const PropertyName& name)
+        : m_object(object)
+        , m_name(name)
+    {
+    }
+    
+    JS_EXPORT_PRIVATE void dump(PrintStream&) const override;
+    
+    JS_EXPORT_PRIVATE static void touch(VM&, WatchpointSet*, JSObject*, const PropertyName&);
+
+private:
+    JSObject* m_object;
+    const PropertyName& m_name;
+};
+
+} // namespace JSC
diff --git a/bytecode/VirtualRegister.cpp b/bytecode/VirtualRegister.cpp
new file mode 100644
index 0000000..57cdb62
--- /dev/null
+++ b/bytecode/VirtualRegister.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void VirtualRegister::dump(PrintStream& out) const
+{
+    if (!isValid()) {
+        out.print("");
+        return;
+    }
+    
+    if (isHeader()) {
+        out.print("head", m_virtualRegister);
+        return;
+    }
+    
+    if (isConstant()) {
+        out.print("const", toConstantIndex());
+        return;
+    }
+    
+    if (isArgument()) {
+        if (!toArgument())
+            out.print("this");
+        else
+            out.print("arg", toArgument());
+        return;
+    }
+    
+    if (isLocal()) {
+        out.print("loc", toLocal());
+        return;
+    }
+    
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/VirtualRegister.h b/bytecode/VirtualRegister.h
new file mode 100644
index 0000000..f32e8d2
--- /dev/null
+++ b/bytecode/VirtualRegister.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include "BytecodeConventions.h"
+#include "CallFrame.h"
+#include 
+
+namespace JSC {
+
+inline bool operandIsLocal(int operand)
+{
+    return operand < 0;
+}
+
+inline bool operandIsArgument(int operand)
+{
+    return operand >= 0;
+}
+
+
+class VirtualRegister {
+public:
+    friend VirtualRegister virtualRegisterForLocal(int);
+    friend VirtualRegister virtualRegisterForArgument(int, int);
+
+    VirtualRegister()
+        : m_virtualRegister(s_invalidVirtualRegister)
+    { }
+
+    explicit VirtualRegister(int virtualRegister)
+        : m_virtualRegister(virtualRegister)
+    { }
+
+    bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); }
+    bool isLocal() const { return operandIsLocal(m_virtualRegister); }
+    bool isArgument() const { return operandIsArgument(m_virtualRegister); }
+    bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < CallFrameSlot::thisArgument; }
+    bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; }
+    int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); }
+    int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); }
+    int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; }
+    int offset() const { return m_virtualRegister; }
+    int offsetInBytes() const { return m_virtualRegister * sizeof(Register); }
+
+    bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; }
+    bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; }
+    bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; }
+    bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; }
+    bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; }
+    bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; }
+    
+    VirtualRegister operator+(int value) const
+    {
+        return VirtualRegister(offset() + value);
+    }
+    VirtualRegister operator-(int value) const
+    {
+        return VirtualRegister(offset() - value);
+    }
+    VirtualRegister operator+(VirtualRegister value) const
+    {
+        return VirtualRegister(offset() + value.offset());
+    }
+    VirtualRegister operator-(VirtualRegister value) const
+    {
+        return VirtualRegister(offset() - value.offset());
+    }
+    VirtualRegister& operator+=(int value)
+    {
+        return *this = *this + value;
+    }
+    VirtualRegister& operator-=(int value)
+    {
+        return *this = *this - value;
+    }
+    
+    void dump(PrintStream& out) const;
+
+private:
+    static const int s_invalidVirtualRegister = 0x3fffffff;
+    static const int s_firstConstantRegisterIndex = FirstConstantRegisterIndex;
+
+    static int localToOperand(int local) { return -1 - local; }
+    static int operandToLocal(int operand) { return -1 - operand; }
+    static int operandToArgument(int operand) { return operand - CallFrame::thisArgumentOffset(); }
+    static int argumentToOperand(int argument) { return argument + CallFrame::thisArgumentOffset(); }
+
+    int m_virtualRegister;
+};
+
+COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit);
+
+inline VirtualRegister virtualRegisterForLocal(int local)
+{
+    return VirtualRegister(VirtualRegister::localToOperand(local));
+}
+
+inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0)
+{
+    return VirtualRegister(VirtualRegister::argumentToOperand(argument) + offset);
+}
+
+} // namespace JSC
diff --git a/bytecode/Watchpoint.cpp b/bytecode/Watchpoint.cpp
new file mode 100644
index 0000000..fbe952d
--- /dev/null
+++ b/bytecode/Watchpoint.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "Watchpoint.h"
+
+#include "HeapInlines.h"
+#include "VM.h"
+#include 
+
+namespace JSC {
+
+void StringFireDetail::dump(PrintStream& out) const
+{
+    out.print(m_string);
+}
+
+Watchpoint::~Watchpoint()
+{
+    if (isOnList()) {
+        // This will happen if we get destroyed before the set fires. That's totally a valid
+        // possibility. For example:
+        //
+        // CodeBlock has a Watchpoint on transition from structure S1. The transition never
+        // happens, but the CodeBlock gets destroyed because of GC.
+        remove();
+    }
+}
+
+void Watchpoint::fire(const FireDetail& detail)
+{
+    RELEASE_ASSERT(!isOnList());
+    fireInternal(detail);
+}
+
+WatchpointSet::WatchpointSet(WatchpointState state)
+    : m_state(state)
+    , m_setIsNotEmpty(false)
+{
+}
+
+WatchpointSet::~WatchpointSet()
+{
+    // Remove all watchpoints, so that they don't try to remove themselves. Note that we
+    // don't fire watchpoints on deletion. We assume that any code that is interested in
+    // watchpoints already also separately has a mechanism to make sure that the code is
+    // either keeping the watchpoint set's owner alive, or does some weak reference thing.
+    while (!m_set.isEmpty())
+        m_set.begin()->remove();
+}
+
+void WatchpointSet::add(Watchpoint* watchpoint)
+{
+    ASSERT(!isCompilationThread());
+    ASSERT(state() != IsInvalidated);
+    if (!watchpoint)
+        return;
+    m_set.push(watchpoint);
+    m_setIsNotEmpty = true;
+    m_state = IsWatched;
+}
+
+void WatchpointSet::fireAllSlow(VM& vm, const FireDetail& detail)
+{
+    ASSERT(state() == IsWatched);
+    
+    WTF::storeStoreFence();
+    m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints.
+    fireAllWatchpoints(vm, detail);
+    WTF::storeStoreFence();
+}
+
+void WatchpointSet::fireAllSlow(VM& vm, const char* reason)
+{
+    fireAllSlow(vm, StringFireDetail(reason));
+}
+
+void WatchpointSet::fireAllWatchpoints(VM& vm, const FireDetail& detail)
+{
+    // In case there are any adaptive watchpoints, we need to make sure that they see that this
+    // watchpoint has been already invalidated.
+    RELEASE_ASSERT(hasBeenInvalidated());
+
+    // Firing a watchpoint may cause a GC to happen. This GC could destroy various
+    // Watchpoints themselves while they're in the process of firing. It's not safe
+    // for most Watchpoints to be destructed while they're in the middle of firing.
+    // This GC could also destroy us, and we're not in a safe state to be destroyed.
+    // The safest thing to do is to DeferGCForAWhile to prevent this GC from happening.
+    DeferGCForAWhile deferGC(vm.heap);
+    
+    while (!m_set.isEmpty()) {
+        Watchpoint* watchpoint = m_set.begin();
+        ASSERT(watchpoint->isOnList());
+        
+        // Removing the Watchpoint before firing it makes it possible to implement watchpoints
+        // that add themselves to a different set when they fire. This kind of "adaptive"
+        // watchpoint can be used to track some semantic property that is more fine-graiend than
+        // what the set can convey. For example, we might care if a singleton object ever has a
+        // property called "foo". We can watch for this by checking if its Structure has "foo" and
+        // then watching its transitions. But then the watchpoint fires if any property is added.
+        // So, before the watchpoint decides to invalidate any code, it can check if it is
+        // possible to add itself to the transition watchpoint set of the singleton object's new
+        // Structure.
+        watchpoint->remove();
+        ASSERT(m_set.begin() != watchpoint);
+        ASSERT(!watchpoint->isOnList());
+        
+        watchpoint->fire(detail);
+        // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's
+        // fine, because we have no use for the pointer anymore.
+    }
+}
+
+void InlineWatchpointSet::add(Watchpoint* watchpoint)
+{
+    inflate()->add(watchpoint);
+}
+
+void InlineWatchpointSet::fireAll(VM& vm, const char* reason)
+{
+    fireAll(vm, StringFireDetail(reason));
+}
+
+WatchpointSet* InlineWatchpointSet::inflateSlow()
+{
+    ASSERT(isThin());
+    ASSERT(!isCompilationThread());
+    WatchpointSet* fat = adoptRef(new WatchpointSet(decodeState(m_data))).leakRef();
+    WTF::storeStoreFence();
+    m_data = bitwise_cast(fat);
+    return fat;
+}
+
+void InlineWatchpointSet::freeFat()
+{
+    ASSERT(isFat());
+    fat()->deref();
+}
+
+} // namespace JSC
+
diff --git a/bytecode/Watchpoint.h b/bytecode/Watchpoint.h
new file mode 100644
index 0000000..69e393d
--- /dev/null
+++ b/bytecode/Watchpoint.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2012-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace JSC {
+
+class FireDetail {
+    void* operator new(size_t) = delete;
+    
+public:
+    FireDetail()
+    {
+    }
+    
+    virtual ~FireDetail()
+    {
+    }
+    
+    virtual void dump(PrintStream&) const = 0;
+};
+
+class StringFireDetail : public FireDetail {
+public:
+    StringFireDetail(const char* string)
+        : m_string(string)
+    {
+    }
+    
+    void dump(PrintStream& out) const override;
+
+private:
+    const char* m_string;
+};
+
+class WatchpointSet;
+
+class Watchpoint : public BasicRawSentinelNode {
+    WTF_MAKE_NONCOPYABLE(Watchpoint);
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    Watchpoint()
+    {
+    }
+    
+    virtual ~Watchpoint();
+
+protected:
+    virtual void fireInternal(const FireDetail&) = 0;
+
+private:
+    friend class WatchpointSet;
+    void fire(const FireDetail&);
+};
+
+enum WatchpointState {
+    ClearWatchpoint,
+    IsWatched,
+    IsInvalidated
+};
+
+class InlineWatchpointSet;
+class VM;
+
+class WatchpointSet : public ThreadSafeRefCounted {
+    friend class LLIntOffsetsExtractor;
+public:
+    JS_EXPORT_PRIVATE WatchpointSet(WatchpointState);
+    
+    // FIXME: In many cases, it would be amazing if this *did* fire the watchpoints. I suspect that
+    // this might be hard to get right, but still, it might be awesome.
+    JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this.
+    
+    // Fast way of getting the state, which only works from the main thread.
+    WatchpointState stateOnJSThread() const
+    {
+        return static_cast(m_state);
+    }
+    
+    // It is safe to call this from another thread. It may return an old
+    // state. Guarantees that if *first* read the state() of the thing being
+    // watched and it returned IsWatched and *second* you actually read its
+    // value then it's safe to assume that if the state being watched changes
+    // then also the watchpoint state() will change to IsInvalidated.
+    WatchpointState state() const
+    {
+        WTF::loadLoadFence();
+        WatchpointState result = static_cast(m_state);
+        WTF::loadLoadFence();
+        return result;
+    }
+    
+    // It is safe to call this from another thread.  It may return true
+    // even if the set actually had been invalidated, but that ought to happen
+    // only in the case of races, and should be rare. Guarantees that if you
+    // call this after observing something that must imply that the set is
+    // invalidated, then you will see this return false. This is ensured by
+    // issuing a load-load fence prior to querying the state.
+    bool isStillValid() const
+    {
+        return state() != IsInvalidated;
+    }
+    // Like isStillValid(), may be called from another thread.
+    bool hasBeenInvalidated() const { return !isStillValid(); }
+    
+    // As a convenience, this will ignore 0. That's because code paths in the DFG
+    // that create speculation watchpoints may choose to bail out if speculation
+    // had already been terminated.
+    void add(Watchpoint*);
+    
+    // Force the watchpoint set to behave as if it was being watched even if no
+    // watchpoints have been installed. This will result in invalidation if the
+    // watchpoint would have fired. That's a pretty good indication that you
+    // probably don't want to set watchpoints, since we typically don't want to
+    // set watchpoints that we believe will actually be fired.
+    void startWatching()
+    {
+        ASSERT(m_state != IsInvalidated);
+        if (m_state == IsWatched)
+            return;
+        WTF::storeStoreFence();
+        m_state = IsWatched;
+        WTF::storeStoreFence();
+    }
+    
+    void fireAll(VM& vm, const FireDetail& detail)
+    {
+        if (LIKELY(m_state != IsWatched))
+            return;
+        fireAllSlow(vm, detail);
+    }
+    
+    void fireAll(VM& vm, const char* reason)
+    {
+        if (LIKELY(m_state != IsWatched))
+            return;
+        fireAllSlow(vm, reason);
+    }
+    
+    void touch(VM& vm, const FireDetail& detail)
+    {
+        if (state() == ClearWatchpoint)
+            startWatching();
+        else
+            fireAll(vm, detail);
+    }
+    
+    void touch(VM& vm, const char* reason)
+    {
+        touch(vm, StringFireDetail(reason));
+    }
+    
+    void invalidate(VM& vm, const FireDetail& detail)
+    {
+        if (state() == IsWatched)
+            fireAll(vm, detail);
+        m_state = IsInvalidated;
+    }
+    
+    void invalidate(VM& vm, const char* reason)
+    {
+        invalidate(vm, StringFireDetail(reason));
+    }
+    
+    bool isBeingWatched() const
+    {
+        return m_setIsNotEmpty;
+    }
+    
+    int8_t* addressOfState() { return &m_state; }
+    static ptrdiff_t offsetOfState() { return OBJECT_OFFSETOF(WatchpointSet, m_state); }
+    int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; }
+    
+    JS_EXPORT_PRIVATE void fireAllSlow(VM&, const FireDetail&); // Call only if you've checked isWatched.
+    JS_EXPORT_PRIVATE void fireAllSlow(VM&, const char* reason); // Ditto.
+    
+private:
+    void fireAllWatchpoints(VM&, const FireDetail&);
+    
+    friend class InlineWatchpointSet;
+
+    int8_t m_state;
+    int8_t m_setIsNotEmpty;
+
+    SentinelLinkedList> m_set;
+};
+
+// InlineWatchpointSet is a low-overhead, non-copyable watchpoint set in which
+// it is not possible to quickly query whether it is being watched in a single
+// branch. There is a fairly simple tradeoff between WatchpointSet and
+// InlineWatchpointSet:
+//
+// Do you have to emit JIT code that rapidly tests whether the watchpoint set
+// is being watched?  If so, use WatchpointSet.
+//
+// Do you need multiple parties to have pointers to the same WatchpointSet?
+// If so, use WatchpointSet.
+//
+// Do you have to allocate a lot of watchpoint sets?  If so, use
+// InlineWatchpointSet unless you answered "yes" to the previous questions.
+//
+// InlineWatchpointSet will use just one pointer-width word of memory unless
+// you actually add watchpoints to it, in which case it internally inflates
+// to a pointer to a WatchpointSet, and transfers its state to the
+// WatchpointSet.
+
+class InlineWatchpointSet {
+    WTF_MAKE_NONCOPYABLE(InlineWatchpointSet);
+public:
+    InlineWatchpointSet(WatchpointState state)
+        : m_data(encodeState(state))
+    {
+    }
+    
+    ~InlineWatchpointSet()
+    {
+        if (isThin())
+            return;
+        freeFat();
+    }
+    
+    // Fast way of getting the state, which only works from the main thread.
+    WatchpointState stateOnJSThread() const
+    {
+        uintptr_t data = m_data;
+        if (isFat(data))
+            return fat(data)->stateOnJSThread();
+        return decodeState(data);
+    }
+
+    // It is safe to call this from another thread. It may return a prior state,
+    // but that should be fine since you should only perform actions based on the
+    // state if you also add a watchpoint.
+    WatchpointState state() const
+    {
+        WTF::loadLoadFence();
+        uintptr_t data = m_data;
+        WTF::loadLoadFence();
+        if (isFat(data))
+            return fat(data)->state();
+        return decodeState(data);
+    }
+    
+    // It is safe to call this from another thread.  It may return false
+    // even if the set actually had been invalidated, but that ought to happen
+    // only in the case of races, and should be rare.
+    bool hasBeenInvalidated() const
+    {
+        return state() == IsInvalidated;
+    }
+    
+    // Like hasBeenInvalidated(), may be called from another thread.
+    bool isStillValid() const
+    {
+        return !hasBeenInvalidated();
+    }
+    
+    void add(Watchpoint*);
+    
+    void startWatching()
+    {
+        if (isFat()) {
+            fat()->startWatching();
+            return;
+        }
+        ASSERT(decodeState(m_data) != IsInvalidated);
+        m_data = encodeState(IsWatched);
+    }
+    
+    void fireAll(VM& vm, const FireDetail& detail)
+    {
+        if (isFat()) {
+            fat()->fireAll(vm, detail);
+            return;
+        }
+        if (decodeState(m_data) == ClearWatchpoint)
+            return;
+        m_data = encodeState(IsInvalidated);
+        WTF::storeStoreFence();
+    }
+    
+    void invalidate(VM& vm, const FireDetail& detail)
+    {
+        if (isFat())
+            fat()->invalidate(vm, detail);
+        else
+            m_data = encodeState(IsInvalidated);
+    }
+    
+    JS_EXPORT_PRIVATE void fireAll(VM&, const char* reason);
+    
+    void touch(VM& vm, const FireDetail& detail)
+    {
+        if (isFat()) {
+            fat()->touch(vm, detail);
+            return;
+        }
+        uintptr_t data = m_data;
+        if (decodeState(data) == IsInvalidated)
+            return;
+        WTF::storeStoreFence();
+        if (decodeState(data) == ClearWatchpoint)
+            m_data = encodeState(IsWatched);
+        else
+            m_data = encodeState(IsInvalidated);
+        WTF::storeStoreFence();
+    }
+    
+    void touch(VM& vm, const char* reason)
+    {
+        touch(vm, StringFireDetail(reason));
+    }
+
+    // Note that for any watchpoint that is visible from the DFG, it would be incorrect to write code like:
+    //
+    // if (w.isBeingWatched())
+    //     w.fireAll()
+    //
+    // Concurrently to this, the DFG could do:
+    //
+    // if (w.isStillValid())
+    //     perform optimizations;
+    // if (!w.isStillValid())
+    //     retry compilation;
+    //
+    // Note that the DFG algorithm is widespread, and sound, because fireAll() and invalidate() will leave
+    // the watchpoint in a !isStillValid() state. Hence, if fireAll() or invalidate() interleaved between
+    // the first isStillValid() check and the second one, then it would simply cause the DFG to retry
+    // compilation later.
+    //
+    // But, if you change some piece of state that the DFG might optimize for, but invalidate the
+    // watchpoint by doing:
+    //
+    // if (w.isBeingWatched())
+    //     w.fireAll()
+    //
+    // then the DFG would never know that you invalidated state between the two checks.
+    //
+    // There are two ways to work around this:
+    //
+    // - Call fireAll() without a isBeingWatched() check. Then, the DFG will know that the watchpoint has
+    //   been invalidated when it does its second check.
+    //
+    // - Do not expose the watchpoint set to the DFG directly, and have your own way of validating whether
+    //   the assumptions that the DFG thread used are still valid when the DFG code is installed.
+    bool isBeingWatched() const
+    {
+        if (isFat())
+            return fat()->isBeingWatched();
+        return false;
+    }
+    
+private:
+    static const uintptr_t IsThinFlag        = 1;
+    static const uintptr_t StateMask         = 6;
+    static const uintptr_t StateShift        = 1;
+    
+    static bool isThin(uintptr_t data) { return data & IsThinFlag; }
+    static bool isFat(uintptr_t data) { return !isThin(data); }
+    
+    static WatchpointState decodeState(uintptr_t data)
+    {
+        ASSERT(isThin(data));
+        return static_cast((data & StateMask) >> StateShift);
+    }
+    
+    static uintptr_t encodeState(WatchpointState state)
+    {
+        return (static_cast(state) << StateShift) | IsThinFlag;
+    }
+    
+    bool isThin() const { return isThin(m_data); }
+    bool isFat() const { return isFat(m_data); };
+    
+    static WatchpointSet* fat(uintptr_t data)
+    {
+        return bitwise_cast(data);
+    }
+    
+    WatchpointSet* fat()
+    {
+        ASSERT(isFat());
+        return fat(m_data);
+    }
+    
+    const WatchpointSet* fat() const
+    {
+        ASSERT(isFat());
+        return fat(m_data);
+    }
+    
+    WatchpointSet* inflate()
+    {
+        if (LIKELY(isFat()))
+            return fat();
+        return inflateSlow();
+    }
+    
+    JS_EXPORT_PRIVATE WatchpointSet* inflateSlow();
+    JS_EXPORT_PRIVATE void freeFat();
+    
+    uintptr_t m_data;
+};
+
+} // namespace JSC
diff --git a/bytecompiler/BytecodeGenerator.cpp b/bytecompiler/BytecodeGenerator.cpp
new file mode 100644
index 0000000..ff782ef
--- /dev/null
+++ b/bytecompiler/BytecodeGenerator.cpp
@@ -0,0 +1,4955 @@
+/*
+ * Copyright (C) 2008-2009, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich 
+ * Copyright (C) 2012 Igalia, S.L.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeGenerator.h"
+
+#include "ArithProfile.h"
+#include "BuiltinExecutables.h"
+#include "BytecodeGeneratorification.h"
+#include "BytecodeLivenessAnalysis.h"
+#include "DefinePropertyAttributes.h"
+#include "Interpreter.h"
+#include "JSCInlines.h"
+#include "JSFunction.h"
+#include "JSGeneratorFunction.h"
+#include "JSLexicalEnvironment.h"
+#include "JSTemplateRegistryKey.h"
+#include "LowLevelInterpreter.h"
+#include "Options.h"
+#include "StackAlignment.h"
+#include "StrongInlines.h"
+#include "UnlinkedCodeBlock.h"
+#include "UnlinkedEvalCodeBlock.h"
+#include "UnlinkedFunctionCodeBlock.h"
+#include "UnlinkedInstructionStream.h"
+#include "UnlinkedModuleProgramCodeBlock.h"
+#include "UnlinkedProgramCodeBlock.h"
+#include 
+#include 
+#include 
+#include 
+#include 
+
+using namespace std;
+
+namespace JSC {
+
+void Label::setLocation(unsigned location)
+{
+    m_location = location;
+    
+    unsigned size = m_unresolvedJumps.size();
+    for (unsigned i = 0; i < size; ++i)
+        m_generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first;
+}
+
+void Variable::dump(PrintStream& out) const
+{
+    out.print(
+        "{ident = ", m_ident,
+        ", offset = ", m_offset,
+        ", local = ", RawPointer(m_local),
+        ", attributes = ", m_attributes,
+        ", kind = ", m_kind,
+        ", symbolTableConstantIndex = ", m_symbolTableConstantIndex,
+        ", isLexicallyScoped = ", m_isLexicallyScoped, "}");
+}
+
+ParserError BytecodeGenerator::generate()
+{
+    m_codeBlock->setThisRegister(m_thisRegister.virtualRegister());
+
+    emitLogShadowChickenPrologueIfNecessary();
+    
+    // If we have declared a variable named "arguments" and we are using arguments then we should
+    // perform that assignment now.
+    if (m_needToInitializeArguments)
+        initializeVariable(variable(propertyNames().arguments), m_argumentsRegister);
+
+    if (m_restParameter)
+        m_restParameter->emit(*this);
+
+    {
+        RefPtr temp = newTemporary();
+        RefPtr globalScope;
+        for (auto functionPair : m_functionsToInitialize) {
+            FunctionMetadataNode* metadata = functionPair.first;
+            FunctionVariableType functionType = functionPair.second;
+            emitNewFunction(temp.get(), metadata);
+            if (functionType == NormalFunctionVariable)
+                initializeVariable(variable(metadata->ident()), temp.get());
+            else if (functionType == GlobalFunctionVariable) {
+                if (!globalScope) {
+                    // We know this will resolve to the global object because our parser/global initialization code 
+                    // doesn't allow let/const/class variables to have the same names as functions.
+                    RefPtr globalObjectScope = emitResolveScope(nullptr, Variable(metadata->ident())); 
+                    globalScope = newBlockScopeVariable(); 
+                    emitMove(globalScope.get(), globalObjectScope.get());
+                }
+                emitPutToScope(globalScope.get(), Variable(metadata->ident()), temp.get(), ThrowIfNotFound, InitializationMode::NotInitialization);
+            } else
+                RELEASE_ASSERT_NOT_REACHED();
+        }
+    }
+    
+    bool callingClassConstructor = constructorKind() != ConstructorKind::None && !isConstructor();
+    if (!callingClassConstructor)
+        m_scopeNode->emitBytecode(*this);
+
+    m_staticPropertyAnalyzer.kill();
+
+    for (unsigned i = 0; i < m_tryRanges.size(); ++i) {
+        TryRange& range = m_tryRanges[i];
+        int start = range.start->bind();
+        int end = range.end->bind();
+        
+        // This will happen for empty try blocks and for some cases of finally blocks:
+        //
+        // try {
+        //    try {
+        //    } finally {
+        //        return 42;
+        //        // *HERE*
+        //    }
+        // } finally {
+        //    print("things");
+        // }
+        //
+        // The return will pop scopes to execute the outer finally block. But this includes
+        // popping the try context for the inner try. The try context is live in the fall-through
+        // part of the finally block not because we will emit a handler that overlaps the finally,
+        // but because we haven't yet had a chance to plant the catch target. Then when we finish
+        // emitting code for the outer finally block, we repush the try contex, this time with a
+        // new start index. But that means that the start index for the try range corresponding
+        // to the inner-finally-following-the-return (marked as "*HERE*" above) will be greater
+        // than the end index of the try block. This is harmless since end < start handlers will
+        // never get matched in our logic, but we do the runtime a favor and choose to not emit
+        // such handlers at all.
+        if (end <= start)
+            continue;
+        
+        ASSERT(range.tryData->handlerType != HandlerType::Illegal);
+        UnlinkedHandlerInfo info(static_cast(start), static_cast(end),
+            static_cast(range.tryData->target->bind()), range.tryData->handlerType);
+        m_codeBlock->addExceptionHandler(info);
+    }
+    
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(m_codeBlock->parseMode()))
+        performGeneratorification(m_codeBlock.get(), m_instructions, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex);
+
+    m_codeBlock->setInstructions(std::make_unique(m_instructions));
+
+    m_codeBlock->shrinkToFit();
+
+    if (m_expressionTooDeep)
+        return ParserError(ParserError::OutOfMemory);
+    return ParserError(ParserError::ErrorNone);
+}
+
+BytecodeGenerator::BytecodeGenerator(VM& vm, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, const VariableEnvironment* parentScopeTDZVariables)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_scopeNode(programNode)
+    , m_codeBlock(vm, codeBlock)
+    , m_thisRegister(CallFrame::thisArgumentOffset())
+    , m_codeType(GlobalCode)
+    , m_vm(&vm)
+    , m_needsToUpdateArrowFunctionContext(programNode->usesArrowFunction() || programNode->usesEval())
+{
+    ASSERT_UNUSED(parentScopeTDZVariables, !parentScopeTDZVariables->size());
+
+    for (auto& constantRegister : m_linkTimeConstantRegisters)
+        constantRegister = nullptr;
+
+    allocateCalleeSaveSpace();
+
+    m_codeBlock->setNumParameters(1); // Allocate space for "this"
+
+    emitEnter();
+
+    allocateAndEmitScope();
+
+    emitWatchdog();
+
+    const FunctionStack& functionStack = programNode->functionStack();
+
+    for (size_t i = 0; i < functionStack.size(); ++i) {
+        FunctionMetadataNode* function = functionStack[i];
+        m_functionsToInitialize.append(std::make_pair(function, GlobalFunctionVariable));
+    }
+    if (Options::validateBytecode()) {
+        for (auto& entry : programNode->varDeclarations())
+            RELEASE_ASSERT(entry.value.isVar());
+    }
+    codeBlock->setVariableDeclarations(programNode->varDeclarations());
+    codeBlock->setLexicalDeclarations(programNode->lexicalVariables());
+    // Even though this program may have lexical variables that go under TDZ, when linking the get_from_scope/put_to_scope
+    // operations we emit we will have ResolveTypes that implictly do TDZ checks. Therefore, we don't need
+    // additional TDZ checks on top of those. This is why we can omit pushing programNode->lexicalVariables()
+    // to the TDZ stack.
+    
+    if (needsToUpdateArrowFunctionContext()) {
+        initializeArrowFunctionContextScopeIfNeeded();
+        emitPutThisToArrowFunctionContextScope();
+    }
+}
+
+BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, const VariableEnvironment* parentScopeTDZVariables)
+    : m_shouldEmitDebugHooks(Options::forceDebuggerBytecodeGeneration() || debuggerMode == DebuggerOn)
+    , m_scopeNode(functionNode)
+    , m_codeBlock(vm, codeBlock)
+    , m_codeType(FunctionCode)
+    , m_vm(&vm)
+    , m_isBuiltinFunction(codeBlock->isBuiltinFunction())
+    , m_usesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode())
+    // FIXME: We should be able to have tail call elimination with the profiler
+    // enabled. This is currently not possible because the profiler expects
+    // op_will_call / op_did_call pairs before and after a call, which are not
+    // compatible with tail calls (we have no way of emitting op_did_call).
+    // https://bugs.webkit.org/show_bug.cgi?id=148819
+    , m_inTailPosition(Options::useTailCalls() && !isConstructor() && constructorKind() == ConstructorKind::None && isStrictMode())
+    , m_needsToUpdateArrowFunctionContext(functionNode->usesArrowFunction() || functionNode->usesEval())
+    , m_derivedContextType(codeBlock->derivedContextType())
+{
+    for (auto& constantRegister : m_linkTimeConstantRegisters)
+        constantRegister = nullptr;
+
+    if (m_isBuiltinFunction)
+        m_shouldEmitDebugHooks = false;
+
+    allocateCalleeSaveSpace();
+    
+    SymbolTable* functionSymbolTable = SymbolTable::create(*m_vm);
+    functionSymbolTable->setUsesNonStrictEval(m_usesNonStrictEval);
+    int symbolTableConstantIndex = 0;
+
+    FunctionParameters& parameters = *functionNode->parameters(); 
+    // http://www.ecma-international.org/ecma-262/6.0/index.html#sec-functiondeclarationinstantiation
+    // This implements IsSimpleParameterList in the Ecma 2015 spec.
+    // If IsSimpleParameterList is false, we will create a strict-mode like arguments object.
+    // IsSimpleParameterList is false if the argument list contains any default parameter values,
+    // a rest parameter, or any destructuring patterns.
+    bool isSimpleParameterList = true;
+    // If we do have default parameters, destructuring parameters, or a rest parameter, our parameters will be allocated in a different scope.
+    for (size_t i = 0; i < parameters.size(); i++) {
+        std::pair parameter = parameters.at(i);
+        bool hasDefaultParameterValue = !!parameter.second;
+        auto pattern = parameter.first;
+        bool isSimpleParameter = !hasDefaultParameterValue && pattern->isBindingNode();
+        isSimpleParameterList &= isSimpleParameter;
+    }
+
+    SourceParseMode parseMode = codeBlock->parseMode();
+
+    bool containsArrowOrEvalButNotInArrowBlock = ((functionNode->usesArrowFunction() && functionNode->doAnyInnerArrowFunctionsUseAnyFeature()) || functionNode->usesEval()) && !m_codeBlock->isArrowFunction();
+    bool shouldCaptureSomeOfTheThings = m_shouldEmitDebugHooks || functionNode->needsActivation() || containsArrowOrEvalButNotInArrowBlock;
+
+    bool shouldCaptureAllOfTheThings = m_shouldEmitDebugHooks || codeBlock->usesEval();
+    bool needsArguments = (functionNode->usesArguments() || codeBlock->usesEval() || (functionNode->usesArrowFunction() && !codeBlock->isArrowFunction() && isArgumentsUsedInInnerArrowFunction()));
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode)) {
+        // Generator and AsyncFunction never provides "arguments". "arguments" reference will be resolved in an upper generator function scope.
+        needsArguments = false;
+
+        // Generator and AsyncFunction uses the var scope to save and resume its variables. So the lexical scope is always instantiated.
+        shouldCaptureSomeOfTheThings = true;
+    }
+
+    if (isGeneratorOrAsyncFunctionWrapperParseMode(parseMode) && needsArguments) {
+        // Generator does not provide "arguments". Instead, wrapping GeneratorFunction provides "arguments".
+        // This is because arguments of a generator should be evaluated before starting it.
+        // To workaround it, we evaluate these arguments as arguments of a wrapping generator function, and reference it from a generator.
+        //
+        //    function *gen(a, b = hello())
+        //    {
+        //        return {
+        //            @generatorNext: function (@generator, @generatorState, @generatorValue, @generatorResumeMode, @generatorFrame)
+        //            {
+        //                arguments;  // This `arguments` should reference to the gen's arguments.
+        //                ...
+        //            }
+        //        }
+        //    }
+        shouldCaptureSomeOfTheThings = true;
+    }
+
+    if (shouldCaptureAllOfTheThings)
+        functionNode->varDeclarations().markAllVariablesAsCaptured();
+    
+    auto captures = [&] (UniquedStringImpl* uid) -> bool {
+        if (!shouldCaptureSomeOfTheThings)
+            return false;
+        if (needsArguments && uid == propertyNames().arguments.impl()) {
+            // Actually, we only need to capture the arguments object when we "need full activation"
+            // because of name scopes. But historically we did it this way, so for now we just preserve
+            // the old behavior.
+            // FIXME: https://bugs.webkit.org/show_bug.cgi?id=143072
+            return true;
+        }
+        return functionNode->captures(uid);
+    };
+    auto varKind = [&] (UniquedStringImpl* uid) -> VarKind {
+        return captures(uid) ? VarKind::Scope : VarKind::Stack;
+    };
+
+    m_calleeRegister.setIndex(CallFrameSlot::callee);
+
+    initializeParameters(parameters);
+    ASSERT(!(isSimpleParameterList && m_restParameter));
+
+    emitEnter();
+
+    if (isGeneratorOrAsyncFunctionBodyParseMode(parseMode))
+        m_generatorRegister = &m_parameters[1];
+
+    allocateAndEmitScope();
+
+    emitWatchdog();
+    
+    if (functionNameIsInScope(functionNode->ident(), functionNode->functionMode())) {
+        ASSERT(parseMode != SourceParseMode::GeneratorBodyMode);
+        ASSERT(!isAsyncFunctionBodyParseMode(parseMode));
+        bool isDynamicScope = functionNameScopeIsDynamic(codeBlock->usesEval(), codeBlock->isStrictMode());
+        bool isFunctionNameCaptured = captures(functionNode->ident().impl());
+        bool markAsCaptured = isDynamicScope || isFunctionNameCaptured;
+        emitPushFunctionNameScope(functionNode->ident(), &m_calleeRegister, markAsCaptured);
+    }
+
+    if (shouldCaptureSomeOfTheThings)
+        m_lexicalEnvironmentRegister = addVar();
+
+    if (shouldCaptureSomeOfTheThings || vm.typeProfiler())
+        symbolTableConstantIndex = addConstantValue(functionSymbolTable)->index();
+
+    // We can allocate the "var" environment if we don't have default parameter expressions. If we have
+    // default parameter expressions, we have to hold off on allocating the "var" environment because
+    // the parent scope of the "var" environment is the parameter environment.
+    if (isSimpleParameterList)
+        initializeVarLexicalEnvironment(symbolTableConstantIndex, functionSymbolTable, shouldCaptureSomeOfTheThings);
+
+    // Figure out some interesting facts about our arguments.
+    bool capturesAnyArgumentByName = false;
+    if (functionNode->hasCapturedVariables()) {
+        FunctionParameters& parameters = *functionNode->parameters();
+        for (size_t i = 0; i < parameters.size(); ++i) {
+            auto pattern = parameters.at(i).first;
+            if (!pattern->isBindingNode())
+                continue;
+            const Identifier& ident = static_cast(pattern)->boundProperty();
+            capturesAnyArgumentByName |= captures(ident.impl());
+        }
+    }
+    
+    if (capturesAnyArgumentByName)
+        ASSERT(m_lexicalEnvironmentRegister);
+
+    // Need to know what our functions are called. Parameters have some goofy behaviors when it
+    // comes to functions of the same name.
+    for (FunctionMetadataNode* function : functionNode->functionStack())
+        m_functions.add(function->ident().impl());
+    
+    if (needsArguments) {
+        // Create the arguments object now. We may put the arguments object into the activation if
+        // it is captured. Either way, we create two arguments object variables: one is our
+        // private variable that is immutable, and another that is the user-visible variable. The
+        // immutable one is only used here, or during formal parameter resolutions if we opt for
+        // DirectArguments.
+        
+        m_argumentsRegister = addVar();
+        m_argumentsRegister->ref();
+    }
+    
+    if (needsArguments && !codeBlock->isStrictMode() && isSimpleParameterList) {
+        // If we captured any formal parameter by name, then we use ScopedArguments. Otherwise we
+        // use DirectArguments. With ScopedArguments, we lift all of our arguments into the
+        // activation.
+        
+        if (capturesAnyArgumentByName) {
+            functionSymbolTable->setArgumentsLength(vm, parameters.size());
+            
+            // For each parameter, we have two possibilities:
+            // Either it's a binding node with no function overlap, in which case it gets a name
+            // in the symbol table - or it just gets space reserved in the symbol table. Either
+            // way we lift the value into the scope.
+            for (unsigned i = 0; i < parameters.size(); ++i) {
+                ScopeOffset offset = functionSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+                functionSymbolTable->setArgumentOffset(vm, i, offset);
+                if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first)) {
+                    VarOffset varOffset(offset);
+                    SymbolTableEntry entry(varOffset);
+                    // Stores to these variables via the ScopedArguments object will not do
+                    // notifyWrite(), since that would be cumbersome. Also, watching formal
+                    // parameters when "arguments" is in play is unlikely to be super profitable.
+                    // So, we just disable it.
+                    entry.disableWatching(*m_vm);
+                    functionSymbolTable->set(NoLockingNecessary, name, entry);
+                }
+                emitOpcode(op_put_to_scope);
+                instructions().append(m_lexicalEnvironmentRegister->index());
+                instructions().append(UINT_MAX);
+                instructions().append(virtualRegisterForArgument(1 + i).offset());
+                instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand());
+                instructions().append(symbolTableConstantIndex);
+                instructions().append(offset.offset());
+            }
+            
+            // This creates a scoped arguments object and copies the overflow arguments into the
+            // scope. It's the equivalent of calling ScopedArguments::createByCopying().
+            emitOpcode(op_create_scoped_arguments);
+            instructions().append(m_argumentsRegister->index());
+            instructions().append(m_lexicalEnvironmentRegister->index());
+        } else {
+            // We're going to put all parameters into the DirectArguments object. First ensure
+            // that the symbol table knows that this is happening.
+            for (unsigned i = 0; i < parameters.size(); ++i) {
+                if (UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first))
+                    functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(DirectArgumentsOffset(i))));
+            }
+            
+            emitOpcode(op_create_direct_arguments);
+            instructions().append(m_argumentsRegister->index());
+        }
+    } else if (isSimpleParameterList) {
+        // Create the formal parameters the normal way. Any of them could be captured, or not. If
+        // captured, lift them into the scope. We cannot do this if we have default parameter expressions
+        // because when default parameter expressions exist, they belong in their own lexical environment
+        // separate from the "var" lexical environment.
+        for (unsigned i = 0; i < parameters.size(); ++i) {
+            UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first);
+            if (!name)
+                continue;
+            
+            if (!captures(name)) {
+                // This is the easy case - just tell the symbol table about the argument. It will
+                // be accessed directly.
+                functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(virtualRegisterForArgument(1 + i))));
+                continue;
+            }
+            
+            ScopeOffset offset = functionSymbolTable->takeNextScopeOffset(NoLockingNecessary);
+            const Identifier& ident =
+                static_cast(parameters.at(i).first)->boundProperty();
+            functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(offset)));
+            
+            emitOpcode(op_put_to_scope);
+            instructions().append(m_lexicalEnvironmentRegister->index());
+            instructions().append(addConstant(ident));
+            instructions().append(virtualRegisterForArgument(1 + i).offset());
+            instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand());
+            instructions().append(symbolTableConstantIndex);
+            instructions().append(offset.offset());
+        }
+    }
+    
+    if (needsArguments && (codeBlock->isStrictMode() || !isSimpleParameterList)) {
+        // Allocate a cloned arguments object.
+        emitOpcode(op_create_cloned_arguments);
+        instructions().append(m_argumentsRegister->index());
+    }
+    
+    // There are some variables that need to be preinitialized to something other than Undefined:
+    //
+    // - "arguments": unless it's used as a function or parameter, this should refer to the
+    //   arguments object.
+    //
+    // - functions: these always override everything else.
+    //
+    // The most logical way to do all of this is to initialize none of the variables until now,
+    // and then initialize them in BytecodeGenerator::generate() in such an order that the rules
+    // for how these things override each other end up holding. We would initialize "arguments" first, 
+    // then all arguments, then the functions.
+    //
+    // But some arguments are already initialized by default, since if they aren't captured and we
+    // don't have "arguments" then we just point the symbol table at the stack slot of those
+    // arguments. We end up initializing the rest of the arguments that have an uncomplicated
+    // binding (i.e. don't involve destructuring) above when figuring out how to lay them out,
+    // because that's just the simplest thing. This means that when we initialize them, we have to
+    // watch out for the things that override arguments (namely, functions).
+    
+    // This is our final act of weirdness. "arguments" is overridden by everything except the
+    // callee. We add it to the symbol table if it's not already there and it's not an argument.
+    bool shouldCreateArgumentsVariableInParameterScope = false;
+    if (needsArguments) {
+        // If "arguments" is overridden by a function or destructuring parameter name, then it's
+        // OK for us to call createVariable() because it won't change anything. It's also OK for
+        // us to them tell BytecodeGenerator::generate() to write to it because it will do so
+        // before it initializes functions and destructuring parameters. But if "arguments" is
+        // overridden by a "simple" function parameter, then we have to bail: createVariable()
+        // would assert and BytecodeGenerator::generate() would write the "arguments" after the
+        // argument value had already been properly initialized.
+        
+        bool haveParameterNamedArguments = false;
+        for (unsigned i = 0; i < parameters.size(); ++i) {
+            UniquedStringImpl* name = visibleNameForParameter(parameters.at(i).first);
+            if (name == propertyNames().arguments.impl()) {
+                haveParameterNamedArguments = true;
+                break;
+            }
+        }
+
+        bool shouldCreateArgumensVariable = !haveParameterNamedArguments
+            && !SourceParseModeSet(SourceParseMode::ArrowFunctionMode, SourceParseMode::AsyncArrowFunctionMode).contains(m_codeBlock->parseMode());
+        shouldCreateArgumentsVariableInParameterScope = shouldCreateArgumensVariable && !isSimpleParameterList;
+        // Do not create arguments variable in case of Arrow function. Value will be loaded from parent scope
+        if (shouldCreateArgumensVariable && !shouldCreateArgumentsVariableInParameterScope) {
+            createVariable(
+                propertyNames().arguments, varKind(propertyNames().arguments.impl()), functionSymbolTable);
+
+            m_needToInitializeArguments = true;
+        }
+    }
+
+    for (FunctionMetadataNode* function : functionNode->functionStack()) {
+        const Identifier& ident = function->ident();
+        createVariable(ident, varKind(ident.impl()), functionSymbolTable);
+        m_functionsToInitialize.append(std::make_pair(function, NormalFunctionVariable));
+    }
+    for (auto& entry : functionNode->varDeclarations()) {
+        ASSERT(!entry.value.isLet() && !entry.value.isConst());
+        if (!entry.value.isVar()) // This is either a parameter or callee.
+            continue;
+        if (shouldCreateArgumentsVariableInParameterScope && entry.key.get() == propertyNames().arguments.impl())
+            continue;
+        createVariable(Identifier::fromUid(m_vm, entry.key.get()), varKind(entry.key.get()), functionSymbolTable, IgnoreExisting);
+    }
+
+
+    m_newTargetRegister = addVar();
+    switch (parseMode) {
+    case SourceParseMode::GeneratorWrapperFunctionMode: {
+        m_generatorRegister = addVar();
+
+        // FIXME: Emit to_this only when Generator uses it.
+        // https://bugs.webkit.org/show_bug.cgi?id=151586
+        m_codeBlock->addPropertyAccessInstruction(instructions().size());
+        emitOpcode(op_to_this);
+        instructions().append(kill(&m_thisRegister));
+        instructions().append(0);
+        instructions().append(0);
+
+        emitMove(m_generatorRegister, &m_calleeRegister);
+        emitCreateThis(m_generatorRegister);
+        break;
+    }
+
+    case SourceParseMode::AsyncArrowFunctionMode:
+    case SourceParseMode::AsyncMethodMode:
+    case SourceParseMode::AsyncFunctionMode: {
+        ASSERT(!isConstructor());
+        ASSERT(constructorKind() == ConstructorKind::None);
+        m_generatorRegister = addVar();
+        m_promiseCapabilityRegister = addVar();
+
+        if (parseMode != SourceParseMode::AsyncArrowFunctionMode) {
+            // FIXME: Emit to_this only when AsyncFunctionBody uses it.
+            // https://bugs.webkit.org/show_bug.cgi?id=151586
+            m_codeBlock->addPropertyAccessInstruction(instructions().size());
+            emitOpcode(op_to_this);
+            instructions().append(kill(&m_thisRegister));
+            instructions().append(0);
+            instructions().append(0);
+        }
+
+        emitNewObject(m_generatorRegister);
+
+        // let promiseCapability be @newPromiseCapability(@Promise)
+        auto varNewPromiseCapability = variable(propertyNames().builtinNames().newPromiseCapabilityPrivateName());
+        RefPtr scope = newTemporary();
+        moveToDestinationIfNeeded(scope.get(), emitResolveScope(scope.get(), varNewPromiseCapability));
+        RefPtr newPromiseCapability = emitGetFromScope(newTemporary(), scope.get(), varNewPromiseCapability, ThrowIfNotFound);
+
+        CallArguments args(*this, nullptr, 1);
+        emitLoad(args.thisRegister(), jsUndefined());
+
+        auto varPromiseConstructor = variable(propertyNames().builtinNames().PromisePrivateName());
+        moveToDestinationIfNeeded(scope.get(), emitResolveScope(scope.get(), varPromiseConstructor));
+        emitGetFromScope(args.argumentRegister(0), scope.get(), varPromiseConstructor, ThrowIfNotFound);
+
+        // JSTextPosition(int _line, int _offset, int _lineStartOffset)
+        JSTextPosition divot(m_scopeNode->firstLine(), m_scopeNode->startOffset(), m_scopeNode->lineStartOffset());
+        emitCall(promiseCapabilityRegister(), newPromiseCapability.get(), NoExpectedFunction, args, divot, divot, divot, DebuggableCall::No);
+        break;
+    }
+
+    case SourceParseMode::AsyncFunctionBodyMode:
+    case SourceParseMode::AsyncArrowFunctionBodyMode:
+    case SourceParseMode::GeneratorBodyMode: {
+        // |this| is already filled correctly before here.
+        emitLoad(m_newTargetRegister, jsUndefined());
+        break;
+    }
+
+    default: {
+        if (SourceParseMode::ArrowFunctionMode != parseMode) {
+            if (isConstructor()) {
+                emitMove(m_newTargetRegister, &m_thisRegister);
+                if (constructorKind() == ConstructorKind::Extends) {
+                    RefPtr